repo_name
string
path
string
copies
string
size
string
content
string
license
string
croniccorey/cronmod-kernel
arch/powerpc/math-emu/fmadd.c
13735
1100
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fmadd(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
palmer-dabbelt/riscv-binutils-gdb
sim/ppc/cap.c
168
3177
/* This file is part of the program psim. Copyright (C) 1994-1995,1997, Andrew Cagney <cagney@highland.com.au> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef _CAP_C_ #define _CAP_C_ #include "cap.h" typedef struct _cap_mapping cap_mapping; struct _cap_mapping { unsigned_cell external; void *internal; cap_mapping *next; }; struct _cap { int nr_mappings; cap_mapping *mappings; }; INLINE_CAP\ (cap *) cap_create(const char *key) { return ZALLOC(cap); } INLINE_CAP\ (void) cap_init(cap *db) { cap_mapping *current_map = db->mappings; if (current_map != NULL) { db->nr_mappings = db->mappings->external; /* verify that the mappings that were not removed are in sequence down to nr 1 */ while (current_map->next != NULL) { if (current_map->external != current_map->next->external + 1) error("cap: cap database possibly corrupt"); current_map = current_map->next; } ASSERT(current_map->next == NULL); if (current_map->external != 1) error("cap: cap database possibly currupt"); } else { db->nr_mappings = 0; } } INLINE_CAP\ (void *) cap_internal(cap *db, signed_cell external) { cap_mapping *current_map = db->mappings; while (current_map != NULL) { if (current_map->external == external) return current_map->internal; current_map = current_map->next; } return (void*)0; } INLINE_CAP\ (signed_cell) cap_external(cap *db, void *internal) { cap_mapping *current_map = db->mappings; while (current_map != NULL) { if (current_map->internal == internal) return current_map->external; current_map = current_map->next; } return 0; } INLINE_CAP\ (void) cap_add(cap *db, void *internal) { if (cap_external(db, internal) != 0) { error("cap: attempting to add an object already in the data base"); } else { /* insert at the front making things in decending order */ cap_mapping *new_map = ZALLOC(cap_mapping); new_map->next = db->mappings; new_map->internal = internal; db->nr_mappings += 1; new_map->external = db->nr_mappings; db->mappings = new_map; } } INLINE_CAP\ (void) cap_remove(cap *db, void *internal) { cap_mapping **current_map = &db->mappings; while (*current_map != NULL) { if ((*current_map)->internal == internal) { cap_mapping *delete = *current_map; *current_map = delete->next; free(delete); return; } current_map = &(*current_map)->next; } error("cap: attempt to remove nonexistant internal object"); } #endif
gpl-2.0
ajopanoor/mic_card_os
drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
168
39784
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_IOCTL_SET_C_ #include <osdep_service.h> #include <drv_types.h> #include <rtw_ioctl_set.h> #include <hal_intf.h> #include <usb_osintf.h> #include <usb_ops.h> extern void indicate_wx_scan_complete_event(struct adapter *padapter); #define IS_MAC_ADDRESS_BROADCAST(addr) \ (\ ((addr[0] == 0xff) && (addr[1] == 0xff) && \ (addr[2] == 0xff) && (addr[3] == 0xff) && \ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \ ) u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid) { u8 i; u8 ret = true; _func_enter_; if (ssid->SsidLength > 32) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n")); ret = false; goto exit; } for (i = 0; i < ssid->SsidLength; i++) { /* wifi, printable ascii code must be supported */ if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has nonprintabl ascii\n")); ret = false; break; } } exit: _func_exit_; return ret; } u8 rtw_do_join(struct adapter *padapter) { unsigned long irqL; struct list_head *plist, *phead; u8 *pibss = NULL; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct __queue *queue = &(pmlmepriv->scanned_queue); u8 ret = _SUCCESS; _func_enter_; _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); phead = get_list_head(queue); plist = get_next(phead); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist)); pmlmepriv->cur_network.join_res = -2; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->pscanned = plist; pmlmepriv->to_join = true; if (_rtw_queue_empty(queue)) { _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */ /* we try to issue sitesurvey firstly */ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic || pmlmepriv->to_roaming > 0) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_do_join(): site survey if scanned_queue is empty\n.")); /* submit site_survey_cmd */ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0); if (_SUCCESS != ret) { pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_do_join(): site survey return error\n.")); } } else { pmlmepriv->to_join = false; ret = _FAIL; } goto exit; } else { int select_ret; _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL); select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv); if (select_ret == _SUCCESS) { pmlmepriv->to_join = false; _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); } else { if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) { /* submit createbss_cmd to change to a ADHOC_MASTER */ /* pmlmepriv->lock has been acquired by caller... */ struct wlan_bssid_ex *pdev_network = &(padapter->registrypriv.dev_network); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; pibss = padapter->registrypriv.dev_network.MacAddress; _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(padapter); rtw_generate_random_ibss(pibss); if (rtw_createbss_cmd(padapter) != _SUCCESS) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n ")); ret = false; goto exit; } pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n ")); } else { /* can't associate ; reset under-linking */ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */ /* we try to issue sitesurvey firstly */ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic || pmlmepriv->to_roaming > 0) { ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0); if (_SUCCESS != ret) { pmlmepriv->to_join = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n.")); } } else { ret = _FAIL; pmlmepriv->to_join = false; } } } } exit: _func_exit_; return ret; } u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid) { unsigned long irqL; u8 status = _SUCCESS; u32 cur_time = 0; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _func_enter_; DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid); if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 && bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) || (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF && bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) { status = _FAIL; goto exit; } _enter_critical_bh(&pmlmepriv->lock, &irqL); DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv)); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) goto handle_tkip_countermeasure; else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) goto release_mlme_lock; if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n")); if (_rtw_memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) { if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false) goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */ } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid =%pM\n", (bssid))); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid =%pM\n", (pmlmepriv->cur_network.network.MacAddress))); rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter, 1); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } } handle_tkip_countermeasure: /* should we add something here...? */ if (padapter->securitypriv.btkip_countermeasure) { cur_time = rtw_get_current_time(); if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) { padapter->securitypriv.btkip_countermeasure = false; padapter->securitypriv.btkip_countermeasure_time = 0; } else { status = _FAIL; goto release_mlme_lock; } } memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN); pmlmepriv->assoc_by_bssid = true; if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) pmlmepriv->to_join = true; else status = rtw_do_join(padapter); release_mlme_lock: _exit_critical_bh(&pmlmepriv->lock, &irqL); exit: RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid: status=%d\n", status)); _func_exit_; return status; } u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid) { unsigned long irqL; u8 status = _SUCCESS; u32 cur_time = 0; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *pnetwork = &pmlmepriv->cur_network; _func_enter_; DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n", ssid->Ssid, get_fwstate(pmlmepriv)); if (!padapter->hw_init_completed) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("set_ssid: hw_init_completed == false =>exit!!!\n")); status = _FAIL; goto exit; } _enter_critical_bh(&pmlmepriv->lock, &irqL); DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv)); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { goto handle_tkip_countermeasure; } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) { goto release_mlme_lock; } if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n")); if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) && (_rtw_memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) { if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("Set SSID is the same ssid, fw_state = 0x%08x\n", get_fwstate(pmlmepriv))); if (!rtw_is_same_ibss(padapter, pnetwork)) { /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */ rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter, 1); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } else { goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */ } } else { rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1); } } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength)); rtw_disassoc_cmd(padapter, 0, true); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter, 1); if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); } } } handle_tkip_countermeasure: if (padapter->securitypriv.btkip_countermeasure) { cur_time = rtw_get_current_time(); if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) { padapter->securitypriv.btkip_countermeasure = false; padapter->securitypriv.btkip_countermeasure_time = 0; } else { status = _FAIL; goto release_mlme_lock; } } memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid)); pmlmepriv->assoc_by_bssid = false; if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { pmlmepriv->to_join = true; } else { status = rtw_do_join(padapter); } release_mlme_lock: _exit_critical_bh(&pmlmepriv->lock, &irqL); exit: RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("-rtw_set_802_11_ssid: status =%d\n", status)); _func_exit_; return status; } u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter, enum ndis_802_11_network_infra networktype) { unsigned long irqL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_network *cur_network = &pmlmepriv->cur_network; enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode); _func_enter_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_, ("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n", *pold_state, networktype, get_fwstate(pmlmepriv))); if (*pold_state != networktype) { _enter_critical_bh(&pmlmepriv->lock, &irqL); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!")); /* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */ if (*pold_state == Ndis802_11APMode) { /* change to other mode from Ndis802_11APMode */ cur_network->join_res = -1; #ifdef CONFIG_88EU_AP_MODE stop_ap_mode(padapter); #endif } if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (*pold_state == Ndis802_11IBSS)) rtw_disassoc_cmd(padapter, 0, true); if ((check_fwstate(pmlmepriv, _FW_LINKED)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) rtw_free_assoc_resources(padapter, 1); if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == true) rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */ } *pold_state = networktype; _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE); switch (networktype) { case Ndis802_11IBSS: set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); break; case Ndis802_11Infrastructure: set_fwstate(pmlmepriv, WIFI_STATION_STATE); break; case Ndis802_11APMode: set_fwstate(pmlmepriv, WIFI_AP_STATE); #ifdef CONFIG_88EU_AP_MODE start_ap_mode(padapter); #endif break; case Ndis802_11AutoUnknown: case Ndis802_11InfrastructureMax: break; } _exit_critical_bh(&pmlmepriv->lock, &irqL); } _func_exit_; return true; } u8 rtw_set_802_11_disassociate(struct adapter *padapter) { unsigned long irqL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _func_enter_; _enter_critical_bh(&pmlmepriv->lock, &irqL); if (check_fwstate(pmlmepriv, _FW_LINKED)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n")); rtw_disassoc_cmd(padapter, 0, true); rtw_indicate_disconnect(padapter); rtw_free_assoc_resources(padapter, 1); rtw_pwr_wakeup(padapter); } _exit_critical_bh(&pmlmepriv->lock, &irqL); _func_exit_; return true; } u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num) { unsigned long irqL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 res = true; _func_enter_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv))); if (padapter == NULL) { res = false; goto exit; } if (!padapter->hw_init_completed) { res = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n === rtw_set_802_11_bssid_list_scan:hw_init_completed == false ===\n")); goto exit; } if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) || (pmlmepriv->LinkDetectInfo.bBusyTraffic)) { /* Scan or linking is in progress, do nothing. */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv))); res = true; if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n")); } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n")); } } else { if (rtw_is_scan_deny(padapter)) { DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter)); indicate_wx_scan_complete_event(padapter); return _SUCCESS; } _enter_critical_bh(&pmlmepriv->lock, &irqL); res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0); _exit_critical_bh(&pmlmepriv->lock, &irqL); } exit: _func_exit_; return res; } u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11_auth_mode authmode) { struct security_priv *psecuritypriv = &padapter->securitypriv; int res; u8 ret; _func_enter_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode)); psecuritypriv->ndisauthtype = authmode; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_authentication_mode:psecuritypriv->ndisauthtype=%d", psecuritypriv->ndisauthtype)); if (psecuritypriv->ndisauthtype > 3) psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; res = rtw_set_auth(padapter, psecuritypriv); if (res == _SUCCESS) ret = true; else ret = false; _func_exit_; return ret; } u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep) { int keyid, res; struct security_priv *psecuritypriv = &(padapter->securitypriv); u8 ret = _SUCCESS; _func_enter_; keyid = wep->KeyIndex & 0x3fffffff; if (keyid >= 4) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n")); ret = false; goto exit; } switch (wep->KeyLength) { case 5: psecuritypriv->dot11PrivacyAlgrthm = _WEP40_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 5\n")); break; case 13: psecuritypriv->dot11PrivacyAlgrthm = _WEP104_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n")); break; default: psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!= 5 or 13\n")); break; } RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_wep:befor memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n", wep->KeyLength, wep->KeyIndex, keyid)); memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength); psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength; psecuritypriv->dot11PrivacyKeyIndex = keyid; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_wep:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n", psecuritypriv->dot11DefKey[keyid].skey[0], psecuritypriv->dot11DefKey[keyid].skey[1], psecuritypriv->dot11DefKey[keyid].skey[2], psecuritypriv->dot11DefKey[keyid].skey[3], psecuritypriv->dot11DefKey[keyid].skey[4], psecuritypriv->dot11DefKey[keyid].skey[5], psecuritypriv->dot11DefKey[keyid].skey[6], psecuritypriv->dot11DefKey[keyid].skey[7], psecuritypriv->dot11DefKey[keyid].skey[8], psecuritypriv->dot11DefKey[keyid].skey[9], psecuritypriv->dot11DefKey[keyid].skey[10], psecuritypriv->dot11DefKey[keyid].skey[11], psecuritypriv->dot11DefKey[keyid].skey[12])); res = rtw_set_key(padapter, psecuritypriv, keyid, 1); if (res == _FAIL) ret = false; exit: _func_exit_; return ret; } u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex) { u8 ret = _SUCCESS; _func_enter_; if (keyindex >= 0x80000000 || padapter == NULL) { ret = false; goto exit; } else { int res; struct security_priv *psecuritypriv = &(padapter->securitypriv); if (keyindex < 4) { _rtw_memset(&psecuritypriv->dot11DefKey[keyindex], 0, 16); res = rtw_set_key(padapter, psecuritypriv, keyindex, 0); psecuritypriv->dot11DefKeylen[keyindex] = 0; if (res == _FAIL) ret = _FAIL; } else { ret = _FAIL; } } exit: _func_exit_; return ret; } u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key) { uint encryptionalgo; u8 *pbssid; struct sta_info *stainfo; u8 bgroup = false; u8 bgrouptkey = false;/* can be removed later */ u8 ret = _SUCCESS; _func_enter_; if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) { /* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */ /* it must fail the request and return NDIS_STATUS_INVALID_DATA. */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_key: ((key->KeyIndex & 0x80000000)==0)[=%d]", (int)(key->KeyIndex & 0x80000000) == 0)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_key:((key->KeyIndex & 0x40000000)>0)[=%d]", (int)(key->KeyIndex & 0x40000000) > 0)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_set_802_11_add_key: key->KeyIndex=%d\n", (int)key->KeyIndex)); ret = _FAIL; goto exit; } if (key->KeyIndex & 0x40000000) { /* Pairwise key */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Pairwise key +++++\n")); pbssid = get_bssid(&padapter->mlmepriv); stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid); if ((stainfo != NULL) && (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY:(stainfo!=NULL)&&(Adapter->securitypriv.dot11AuthAlgrthm==dot11AuthAlgrthm_8021X)\n")); encryptionalgo = stainfo->dot118021XPrivacy; } else { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: stainfo == NULL)||(Adapter->securitypriv.dot11AuthAlgrthm!= dot11AuthAlgrthm_8021X)\n")); encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm; } RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_add_key: (encryptionalgo==%d)!\n", encryptionalgo)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11PrivacyAlgrthm==%d)!\n", padapter->securitypriv.dot11PrivacyAlgrthm)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11AuthAlgrthm==%d)!\n", padapter->securitypriv.dot11AuthAlgrthm)); if ((stainfo != NULL)) RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_add_key: (stainfo->dot118021XPrivacy==%d)!\n", stainfo->dot118021XPrivacy)); if (key->KeyIndex & 0x000000FF) { /* The key index is specified in the lower 8 bits by values of zero to 255. */ /* The key index should be set to zero for a Pairwise key, and the driver should fail with */ /* NDIS_STATUS_INVALID_DATA if the lower 8 bits is not zero */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" key->KeyIndex & 0x000000FF.\n")); ret = _FAIL; goto exit; } /* check BSSID */ if (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == true) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MacAddr_isBcst(key->BSSID)\n")); ret = false; goto exit; } /* Check key length for TKIP. */ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("TKIP KeyLength:0x%x != 32\n", key->KeyLength)); ret = _FAIL; goto exit; } /* Check key length for AES. */ if ((encryptionalgo == _AES_) && (key->KeyLength != 16)) { /* For our supplicant, EAPPkt9x.vxd, cannot differentiate TKIP and AES case. */ if (key->KeyLength == 32) { key->KeyLength = 16; } else { ret = _FAIL; goto exit; } } /* Check key length for WEP. For NDTEST, 2005.01.27, by rcnjko. */ if ((encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_) && (key->KeyLength != 5 && key->KeyLength != 13)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("WEP KeyLength:0x%x != 5 or 13\n", key->KeyLength)); ret = _FAIL; goto exit; } bgroup = false; /* Check the pairwise key. Added by Annie, 2005-07-06. */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Pairwise Key set]\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3))); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength)); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")); } else { /* Group key - KeyIndex(BIT30 == 0) */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Group key +++++\n")); /* when add wep key through add key and didn't assigned encryption type before */ if ((padapter->securitypriv.ndisauthtype <= 3) && (padapter->securitypriv.dot118021XGrpPrivacy == 0)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("keylen =%d(Adapter->securitypriv.dot11PrivacyAlgrthm=%x )padapter->securitypriv.dot118021XGrpPrivacy(%x)\n", key->KeyLength, padapter->securitypriv.dot11PrivacyAlgrthm, padapter->securitypriv.dot118021XGrpPrivacy)); switch (key->KeyLength) { case 5: padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n", padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength)); break; case 13: padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n", padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength)); break; default: padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n", padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength)); break; } encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" Adapter->securitypriv.dot11PrivacyAlgrthm=%x\n", padapter->securitypriv.dot11PrivacyAlgrthm)); } else { encryptionalgo = padapter->securitypriv.dot118021XGrpPrivacy; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("(Adapter->securitypriv.dot11PrivacyAlgrthm=%x)encryptionalgo(%x)=padapter->securitypriv.dot118021XGrpPrivacy(%x)keylen=%d\n", padapter->securitypriv.dot11PrivacyAlgrthm, encryptionalgo, padapter->securitypriv.dot118021XGrpPrivacy, key->KeyLength)); } if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE) == true) && (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == false)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" IBSS but BSSID is not Broadcast Address.\n")); ret = _FAIL; goto exit; } /* Check key length for TKIP */ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) { RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" TKIP GTK KeyLength:%u != 32\n", key->KeyLength)); ret = _FAIL; goto exit; } else if (encryptionalgo == _AES_ && (key->KeyLength != 16 && key->KeyLength != 32)) { /* Check key length for AES */ /* For NDTEST, we allow keylen = 32 in this case. 2005.01.27, by rcnjko. */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("<=== SetInfo, OID_802_11_ADD_KEY: AES GTK KeyLength:%u != 16 or 32\n", key->KeyLength)); ret = _FAIL; goto exit; } /* Change the key length for EAPPkt9x.vxd. Added by Annie, 2005-11-03. */ if ((encryptionalgo == _AES_) && (key->KeyLength == 32)) { key->KeyLength = 16; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("AES key length changed: %u\n", key->KeyLength)); } if (key->KeyIndex & 0x8000000) {/* error ??? 0x8000_0000 */ bgrouptkey = true; } if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE)) && (check_fwstate(&padapter->mlmepriv, _FW_LINKED))) bgrouptkey = true; bgroup = true; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Group Key set]\n")); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")) ; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3))); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength)) ; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")); } /* If WEP encryption algorithm, just call rtw_set_802_11_add_wep(). */ if ((padapter->securitypriv.dot11AuthAlgrthm != dot11AuthAlgrthm_8021X) && (encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_)) { u32 keyindex; u32 len = FIELD_OFFSET(struct ndis_802_11_key, KeyMaterial) + key->KeyLength; struct ndis_802_11_wep *wep = &padapter->securitypriv.ndiswep; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ WEP key +++++\n")); wep->Length = len; keyindex = key->KeyIndex&0x7fffffff; wep->KeyIndex = keyindex ; wep->KeyLength = key->KeyLength; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY:Before memcpy\n")); memcpy(wep->KeyMaterial, key->KeyMaterial, key->KeyLength); memcpy(&(padapter->securitypriv.dot11DefKey[keyindex].skey[0]), key->KeyMaterial, key->KeyLength); padapter->securitypriv.dot11DefKeylen[keyindex] = key->KeyLength; padapter->securitypriv.dot11PrivacyKeyIndex = keyindex; ret = rtw_set_802_11_add_wep(padapter, wep); goto exit; } if (key->KeyIndex & 0x20000000) { /* SetRSC */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ SetRSC+++++\n")); if (bgroup) { unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL; memcpy(&padapter->securitypriv.dot11Grprxpn, &keysrc, 8); } else { unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL; memcpy(&padapter->securitypriv.dot11Grptxpn, &keysrc, 8); } } /* Indicate this key idx is used for TX */ /* Save the key in KeyMaterial */ if (bgroup) { /* Group transmit key */ int res; if (bgrouptkey) padapter->securitypriv.dot118021XGrpKeyid = (u8)key->KeyIndex; if ((key->KeyIndex&0x3) == 0) { ret = _FAIL; goto exit; } _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], 0, 16); _rtw_memset(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16); _rtw_memset(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16); if ((key->KeyIndex & 0x10000000)) { memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8); memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7])); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n")); } else { memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8); memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6], padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7])); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n")); } /* set group key by index */ memcpy(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial, key->KeyLength); key->KeyIndex = key->KeyIndex & 0x03; padapter->securitypriv.binstallGrpkey = true; padapter->securitypriv.bcheck_grpkey = false; RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("reset group key")); res = rtw_set_key(padapter, &padapter->securitypriv, key->KeyIndex, 1); if (res == _FAIL) ret = _FAIL; goto exit; } else { /* Pairwise Key */ u8 res; pbssid = get_bssid(&padapter->mlmepriv); stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid); if (stainfo != NULL) { _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16);/* clear keybuffer */ memcpy(&stainfo->dot118021x_UncstKey, key->KeyMaterial, 16); if (encryptionalgo == _TKIP_) { padapter->securitypriv.busetkipkey = false; /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n========== _set_timer\n")); /* if TKIP, save the Receive/Transmit MIC key in KeyMaterial[128-255] */ if ((key->KeyIndex & 0x10000000)) { memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 16, 8); memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 24, 8); } else { memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 24, 8); memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 16, 8); } } /* Set key to CAM through H2C command */ if (bgrouptkey) { /* never go to here */ res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, false); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(group)\n")); } else { res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, true); RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(unicast)\n")); } if (!res) ret = _FAIL; } } exit: _func_exit_; return ret; } u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove_key *key) { u8 *pbssid; struct sta_info *stainfo; u8 bgroup = (key->KeyIndex & 0x4000000) > 0 ? false : true; u8 keyIndex = (u8)key->KeyIndex & 0x03; u8 ret = _SUCCESS; _func_enter_; if ((key->KeyIndex & 0xbffffffc) > 0) { ret = _FAIL; goto exit; } if (bgroup) { /* clear group key by index */ _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[keyIndex], 0, 16); /* \todo Send a H2C Command to Firmware for removing this Key in CAM Entry. */ } else { pbssid = get_bssid(&padapter->mlmepriv); stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid); if (stainfo) { /* clear key by BSSID */ _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16); /* \todo Send a H2C Command to Firmware for disable this Key in CAM Entry. */ } else { ret = _FAIL; goto exit; } } exit: _func_exit_; return ret; } /* * rtw_get_cur_max_rate - * @adapter: pointer to struct adapter structure * * Return 0 or 100Kbps */ u16 rtw_get_cur_max_rate(struct adapter *adapter) { int i = 0; u8 *p; u16 rate = 0, max_rate = 0; struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct registry_priv *pregistrypriv = &adapter->registrypriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network; struct rtw_ieee80211_ht_cap *pht_capie; u8 rf_type = 0; u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0; u16 mcs_rate = 0; u32 ht_ielen = 0; if (adapter->registrypriv.mp_mode == 1) { if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) return 0; } if ((!check_fwstate(pmlmepriv, _FW_LINKED)) && (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) return 0; if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) { p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12); if (p && ht_ielen > 0) { pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2); memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2); /* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */ bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0; short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0; short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0; rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); max_rate = rtw_mcs_rate( rf_type, bw_40MHz & (pregistrypriv->cbw40_enable), short_GI_20, short_GI_40, pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate ); } } else { while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) { rate = pcur_bss->SupportedRates[i]&0x7F; if (rate > max_rate) max_rate = rate; i++; } max_rate = max_rate*10/2; } return max_rate; } /* * rtw_set_scan_mode - * @adapter: pointer to struct adapter structure * @scan_mode: * * Return _SUCCESS or _FAIL */ int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode) { if (scan_mode != SCAN_ACTIVE && scan_mode != SCAN_PASSIVE) return _FAIL; adapter->mlmepriv.scan_mode = scan_mode; return _SUCCESS; } /* * rtw_set_channel_plan - * @adapter: pointer to struct adapter structure * @channel_plan: * * Return _SUCCESS or _FAIL */ int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan) { /* handle by cmd_thread to sync with scan operation */ return rtw_set_chplan_cmd(adapter, channel_plan, 1); } /* * rtw_set_country - * @adapter: pointer to struct adapter structure * @country_code: string of country code * * Return _SUCCESS or _FAIL */ int rtw_set_country(struct adapter *adapter, const char *country_code) { int channel_plan = RT_CHANNEL_DOMAIN_WORLD_WIDE_5G; DBG_88E("%s country_code:%s\n", __func__, country_code); /* TODO: should have a table to match country code and RT_CHANNEL_DOMAIN */ /* TODO: should consider 2-character and 3-character country code */ if (0 == strcmp(country_code, "US")) channel_plan = RT_CHANNEL_DOMAIN_FCC; else if (0 == strcmp(country_code, "EU")) channel_plan = RT_CHANNEL_DOMAIN_ETSI; else if (0 == strcmp(country_code, "JP")) channel_plan = RT_CHANNEL_DOMAIN_MKK; else if (0 == strcmp(country_code, "CN")) channel_plan = RT_CHANNEL_DOMAIN_CHINA; else DBG_88E("%s unknown country_code:%s\n", __func__, country_code); return rtw_set_channel_plan(adapter, channel_plan); }
gpl-2.0
raulherbster/goldfish
kernel/cred.c
168
14886
/* Task credentials management - see Documentation/credentials.txt * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/cred.h> #include <linux/sched.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/init_task.h> #include <linux/security.h> #include <linux/cn_proc.h> #include "cred-internals.h" static struct kmem_cache *cred_jar; /* * The common credentials for the initial task's thread group */ #ifdef CONFIG_KEYS static struct thread_group_cred init_tgcred = { .usage = ATOMIC_INIT(2), .tgid = 0, .lock = SPIN_LOCK_UNLOCKED, }; #endif /* * The initial credentials for the initial task */ struct cred init_cred = { .usage = ATOMIC_INIT(4), .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_INIT_INH_SET, .cap_permitted = CAP_FULL_SET, .cap_effective = CAP_INIT_EFF_SET, .cap_bset = CAP_INIT_BSET, .user = INIT_USER, .group_info = &init_groups, #ifdef CONFIG_KEYS .tgcred = &init_tgcred, #endif }; /* * Dispose of the shared task group credentials */ #ifdef CONFIG_KEYS static void release_tgcred_rcu(struct rcu_head *rcu) { struct thread_group_cred *tgcred = container_of(rcu, struct thread_group_cred, rcu); BUG_ON(atomic_read(&tgcred->usage) != 0); key_put(tgcred->session_keyring); key_put(tgcred->process_keyring); kfree(tgcred); } #endif /* * Release a set of thread group credentials. */ static void release_tgcred(struct cred *cred) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = cred->tgcred; if (atomic_dec_and_test(&tgcred->usage)) call_rcu(&tgcred->rcu, release_tgcred_rcu); #endif } /* * The RCU callback to actually dispose of a set of credentials */ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); if (atomic_read(&cred->usage) != 0) panic("CRED: put_cred_rcu() sees %p with usage %d\n", cred, atomic_read(&cred->usage)); security_cred_free(cred); key_put(cred->thread_keyring); key_put(cred->request_key_auth); release_tgcred(cred); put_group_info(cred->group_info); free_uid(cred->user); kmem_cache_free(cred_jar, cred); } /** * __put_cred - Destroy a set of credentials * @cred: The record to release * * Destroy a set of credentials on which no references remain. */ void __put_cred(struct cred *cred) { BUG_ON(atomic_read(&cred->usage) != 0); call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); /** * prepare_creds - Prepare a new set of credentials for modification * * Prepare a new set of task credentials for modification. A task's creds * shouldn't generally be modified directly, therefore this function is used to * prepare a new copy, which the caller then modifies and then commits by * calling commit_creds(). * * Preparation involves making a copy of the objective creds for modification. * * Returns a pointer to the new creds-to-be if successful, NULL otherwise. * * Call commit_creds() or abort_creds() to clean up. */ struct cred *prepare_creds(void) { struct task_struct *task = current; const struct cred *old; struct cred *new; BUG_ON(atomic_read(&task->real_cred->usage) < 1); new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; old = task->cred; memcpy(new, old, sizeof(struct cred)); atomic_set(&new->usage, 1); get_group_info(new->group_info); get_uid(new->user); #ifdef CONFIG_KEYS key_get(new->thread_keyring); key_get(new->request_key_auth); atomic_inc(&new->tgcred->usage); #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; return new; error: abort_creds(new); return NULL; } EXPORT_SYMBOL(prepare_creds); /* * Prepare credentials for current to perform an execve() * - The caller must hold current->cred_exec_mutex */ struct cred *prepare_exec_creds(void) { struct thread_group_cred *tgcred = NULL; struct cred *new; #ifdef CONFIG_KEYS tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) return NULL; #endif new = prepare_creds(); if (!new) { kfree(tgcred); return new; } #ifdef CONFIG_KEYS /* newly exec'd tasks don't get a thread keyring */ key_put(new->thread_keyring); new->thread_keyring = NULL; /* create a new per-thread-group creds for all this set of threads to * share */ memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); /* inherit the session keyring; new process keyring */ key_get(tgcred->session_keyring); tgcred->process_keyring = NULL; release_tgcred(new); new->tgcred = tgcred; #endif return new; } /* * prepare new credentials for the usermode helper dispatcher */ struct cred *prepare_usermodehelper_creds(void) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = NULL; #endif struct cred *new; #ifdef CONFIG_KEYS tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC); if (!tgcred) return NULL; #endif new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); if (!new) return NULL; memcpy(new, &init_cred, sizeof(struct cred)); atomic_set(&new->usage, 1); get_group_info(new->group_info); get_uid(new->user); #ifdef CONFIG_KEYS new->thread_keyring = NULL; new->request_key_auth = NULL; new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT; atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); new->tgcred = tgcred; #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) goto error; BUG_ON(atomic_read(&new->usage) != 1); return new; error: put_cred(new); return NULL; } /* * Copy credentials for the new process created by fork() * * We share if we can, but under some circumstances we have to generate a new * set. * * The new process gets the current process's subjective credentials as its * objective and subjective credentials */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred; #endif struct cred *new; int ret; mutex_init(&p->cred_exec_mutex); if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && #endif clone_flags & CLONE_THREAD ) { p->real_cred = get_cred(p->cred); get_cred(p->cred); atomic_inc(&p->cred->user->processes); return 0; } new = prepare_creds(); if (!new) return -ENOMEM; if (clone_flags & CLONE_NEWUSER) { ret = create_user_ns(new); if (ret < 0) goto error_put; } #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ if (new->thread_keyring) { key_put(new->thread_keyring); new->thread_keyring = NULL; if (clone_flags & CLONE_THREAD) install_thread_keyring_to_cred(new); } /* we share the process and session keyrings between all the threads in * a process - this is slightly icky as we violate COW credentials a * bit */ if (!(clone_flags & CLONE_THREAD)) { tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { ret = -ENOMEM; goto error_put; } atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); tgcred->process_keyring = NULL; tgcred->session_keyring = key_get(new->tgcred->session_keyring); release_tgcred(new); new->tgcred = tgcred; } #endif atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); return 0; error_put: put_cred(new); return ret; } /** * commit_creds - Install new credentials upon the current task * @new: The credentials to be assigned * * Install a new set of credentials to the current task, using RCU to replace * the old set. Both the objective and the subjective credentials pointers are * updated. This function may not be called if the subjective credentials are * in an overridden state. * * This function eats the caller's reference to the new credentials. * * Always returns 0 thus allowing this function to be tail-called at the end * of, say, sys_setgid(). */ int commit_creds(struct cred *new) { struct task_struct *task = current; const struct cred *old; BUG_ON(task->cred != task->real_cred); BUG_ON(atomic_read(&task->real_cred->usage) < 2); BUG_ON(atomic_read(&new->usage) < 1); old = task->real_cred; security_commit_creds(new, old); get_cred(new); /* we will require a ref for the subj creds too */ /* dumpability changes */ if (old->euid != new->euid || old->egid != new->egid || old->fsuid != new->fsuid || old->fsgid != new->fsgid || !cap_issubset(new->cap_permitted, old->cap_permitted)) { if (task->mm) set_dumpable(task->mm, suid_dumpable); task->pdeath_signal = 0; smp_wmb(); } /* alter the thread keyring */ if (new->fsuid != old->fsuid) key_fsuid_changed(task); if (new->fsgid != old->fsgid) key_fsgid_changed(task); /* do it * - What if a process setreuid()'s and this brings the * new uid over his NPROC rlimit? We can check this now * cheaply with the new uid cache, so if it matters * we should be checking for it. -DaveM */ if (new->user != old->user) atomic_inc(&new->user->processes); rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) atomic_dec(&old->user->processes); sched_switch_user(task); /* send notifications */ if (new->uid != old->uid || new->euid != old->euid || new->suid != old->suid || new->fsuid != old->fsuid) proc_id_connector(task, PROC_EVENT_UID); if (new->gid != old->gid || new->egid != old->egid || new->sgid != old->sgid || new->fsgid != old->fsgid) proc_id_connector(task, PROC_EVENT_GID); /* release the old obj and subj refs both */ put_cred(old); put_cred(old); return 0; } EXPORT_SYMBOL(commit_creds); /** * abort_creds - Discard a set of credentials and unlock the current task * @new: The credentials that were going to be applied * * Discard a set of credentials that were under construction and unlock the * current task. */ void abort_creds(struct cred *new) { BUG_ON(atomic_read(&new->usage) < 1); put_cred(new); } EXPORT_SYMBOL(abort_creds); /** * override_creds - Override the current process's subjective credentials * @new: The credentials to be assigned * * Install a set of temporary override subjective credentials on the current * process, returning the old set for later reversion. */ const struct cred *override_creds(const struct cred *new) { const struct cred *old = current->cred; rcu_assign_pointer(current->cred, get_cred(new)); return old; } EXPORT_SYMBOL(override_creds); /** * revert_creds - Revert a temporary subjective credentials override * @old: The credentials to be restored * * Revert a temporary set of override subjective credentials to an old set, * discarding the override set. */ void revert_creds(const struct cred *old) { const struct cred *override = current->cred; rcu_assign_pointer(current->cred, old); put_cred(override); } EXPORT_SYMBOL(revert_creds); /* * initialise the credentials stuff */ void __init cred_init(void) { /* allocate a slab in which we can store credentials */ cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } /** * prepare_kernel_cred - Prepare a set of credentials for a kernel service * @daemon: A userspace daemon to be used as a reference * * Prepare a set of credentials for a kernel service. This can then be used to * override a task's own credentials so that work can be done on behalf of that * task that requires a different subjective context. * * @daemon is used to provide a base for the security record, but can be NULL. * If @daemon is supplied, then the security data will be derived from that; * otherwise they'll be set to 0 and no groups, full capabilities and no keys. * * The caller may change these controls afterwards if desired. * * Returns the new credentials or NULL if out of memory. * * Does not take, and does not return holding current->cred_replace_mutex. */ struct cred *prepare_kernel_cred(struct task_struct *daemon) { const struct cred *old; struct cred *new; new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) return NULL; if (daemon) old = get_task_cred(daemon); else old = get_cred(&init_cred); *new = *old; get_uid(new->user); get_group_info(new->group_info); #ifdef CONFIG_KEYS atomic_inc(&init_tgcred.usage); new->tgcred = &init_tgcred; new->request_key_auth = NULL; new->thread_keyring = NULL; new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; #endif #ifdef CONFIG_SECURITY new->security = NULL; #endif if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; atomic_set(&new->usage, 1); put_cred(old); return new; error: put_cred(new); put_cred(old); return NULL; } EXPORT_SYMBOL(prepare_kernel_cred); /** * set_security_override - Set the security ID in a set of credentials * @new: The credentials to alter * @secid: The LSM security ID to set * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. */ int set_security_override(struct cred *new, u32 secid) { return security_kernel_act_as(new, secid); } EXPORT_SYMBOL(set_security_override); /** * set_security_override_from_ctx - Set the security ID in a set of credentials * @new: The credentials to alter * @secctx: The LSM security context to generate the security ID from. * * Set the LSM security ID in a set of credentials so that the subjective * security is overridden when an alternative set of credentials is used. The * security ID is specified in string form as a security context to be * interpreted by the LSM. */ int set_security_override_from_ctx(struct cred *new, const char *secctx) { u32 secid; int ret; ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); if (ret < 0) return ret; return set_security_override(new, secid); } EXPORT_SYMBOL(set_security_override_from_ctx); /** * set_create_files_as - Set the LSM file create context in a set of credentials * @new: The credentials to alter * @inode: The inode to take the context from * * Change the LSM file creation context in a set of credentials to be the same * as the object context of the specified inode, so that the new inodes have * the same MAC context as that inode. */ int set_create_files_as(struct cred *new, struct inode *inode) { new->fsuid = inode->i_uid; new->fsgid = inode->i_gid; return security_kernel_create_files_as(new, inode); } EXPORT_SYMBOL(set_create_files_as);
gpl-2.0
BobZhome/android_kernel_gelato
drivers/usb/gadget/f_fs.c
680
55543
/* * f_fs.c -- user mode filesystem api for usb composite funtcion controllers * * Copyright (C) 2010 Samsung Electronics * Author: Michal Nazarewicz <m.nazarewicz@samsung.com> * * Based on inode.c (GadgetFS): * Copyright (C) 2003-2004 David Brownell * Copyright (C) 2003 Agilent Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/blkdev.h> #include <linux/pagemap.h> #include <asm/unaligned.h> #include <linux/smp_lock.h> #include <linux/usb/composite.h> #include <linux/usb/functionfs.h> #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ /* Debuging *****************************************************************/ #define ffs_printk(level, fmt, args...) printk(level "f_fs: " fmt "\n", ## args) #define FERR(...) ffs_printk(KERN_ERR, __VA_ARGS__) #define FINFO(...) ffs_printk(KERN_INFO, __VA_ARGS__) #ifdef DEBUG # define FDBG(...) ffs_printk(KERN_DEBUG, __VA_ARGS__) #else # define FDBG(...) do { } while (0) #endif /* DEBUG */ #ifdef VERBOSE_DEBUG # define FVDBG FDBG #else # define FVDBG(...) do { } while (0) #endif /* VERBOSE_DEBUG */ #define ENTER() FVDBG("%s()", __func__) #ifdef VERBOSE_DEBUG # define ffs_dump_mem(prefix, ptr, len) \ print_hex_dump_bytes("f_fs" prefix ": ", DUMP_PREFIX_NONE, ptr, len) #else # define ffs_dump_mem(prefix, ptr, len) do { } while (0) #endif /* The data structure and setup file ****************************************/ enum ffs_state { /* Waiting for descriptors and strings. */ /* In this state no open(2), read(2) or write(2) on epfiles * may succeed (which should not be the problem as there * should be no such files opened in the firts place). */ FFS_READ_DESCRIPTORS, FFS_READ_STRINGS, /* We've got descriptors and strings. We are or have called * functionfs_ready_callback(). functionfs_bind() may have * been called but we don't know. */ /* This is the only state in which operations on epfiles may * succeed. */ FFS_ACTIVE, /* All endpoints have been closed. This state is also set if * we encounter an unrecoverable error. The only * unrecoverable error is situation when after reading strings * from user space we fail to initialise EP files or * functionfs_ready_callback() returns with error (<0). */ /* In this state no open(2), read(2) or write(2) (both on ep0 * as well as epfile) may succeed (at this point epfiles are * unlinked and all closed so this is not a problem; ep0 is * also closed but ep0 file exists and so open(2) on ep0 must * fail). */ FFS_CLOSING }; enum ffs_setup_state { /* There is no setup request pending. */ FFS_NO_SETUP, /* User has read events and there was a setup request event * there. The next read/write on ep0 will handle the * request. */ FFS_SETUP_PENDING, /* There was event pending but before user space handled it * some other event was introduced which canceled existing * setup. If this state is set read/write on ep0 return * -EIDRM. This state is only set when adding event. */ FFS_SETUP_CANCELED }; struct ffs_epfile; struct ffs_function; struct ffs_data { struct usb_gadget *gadget; /* Protect access read/write operations, only one read/write * at a time. As a consequence protects ep0req and company. * While setup request is being processed (queued) this is * held. */ struct mutex mutex; /* Protect access to enpoint related structures (basically * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for * endpint zero. */ spinlock_t eps_lock; /* XXX REVISIT do we need our own request? Since we are not * handling setup requests immidiatelly user space may be so * slow that another setup will be sent to the gadget but this * time not to us but another function and then there could be * a race. Is that the case? Or maybe we can use cdev->req * after all, maybe we just need some spinlock for that? */ struct usb_request *ep0req; /* P: mutex */ struct completion ep0req_completion; /* P: mutex */ int ep0req_status; /* P: mutex */ /* reference counter */ atomic_t ref; /* how many files are opened (EP0 and others) */ atomic_t opened; /* EP0 state */ enum ffs_state state; /* * Possible transations: * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock * happens only in ep0 read which is P: mutex * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock * happens only in ep0 i/o which is P: mutex * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg */ enum ffs_setup_state setup_state; #define FFS_SETUP_STATE(ffs) \ ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \ FFS_SETUP_CANCELED, FFS_NO_SETUP)) /* Events & such. */ struct { u8 types[4]; unsigned short count; /* XXX REVISIT need to update it in some places, or do we? */ unsigned short can_stall; struct usb_ctrlrequest setup; wait_queue_head_t waitq; } ev; /* the whole structure, P: ev.waitq.lock */ /* Flags */ unsigned long flags; #define FFS_FL_CALL_CLOSED_CALLBACK 0 #define FFS_FL_BOUND 1 /* Active function */ struct ffs_function *func; /* Device name, write once when file system is mounted. * Intendet for user to read if she wants. */ const char *dev_name; /* Private data for our user (ie. gadget). Managed by * user. */ void *private_data; /* filled by __ffs_data_got_descs() */ /* real descriptors are 16 bytes after raw_descs (so you need * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the * first full speed descriptor). raw_descs_length and * raw_fs_descs_length do not have those 16 bytes added. */ const void *raw_descs; unsigned raw_descs_length; unsigned raw_fs_descs_length; unsigned fs_descs_count; unsigned hs_descs_count; unsigned short strings_count; unsigned short interfaces_count; unsigned short eps_count; unsigned short _pad1; /* filled by __ffs_data_got_strings() */ /* ids in stringtabs are set in functionfs_bind() */ const void *raw_strings; struct usb_gadget_strings **stringtabs; /* File system's super block, write once when file system is mounted. */ struct super_block *sb; /* File permissions, written once when fs is mounted*/ struct ffs_file_perms { umode_t mode; uid_t uid; gid_t gid; } file_perms; /* The endpoint files, filled by ffs_epfiles_create(), * destroyed by ffs_epfiles_destroy(). */ struct ffs_epfile *epfiles; }; /* Reference counter handling */ static void ffs_data_get(struct ffs_data *ffs); static void ffs_data_put(struct ffs_data *ffs); /* Creates new ffs_data object. */ static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); /* Opened counter handling. */ static void ffs_data_opened(struct ffs_data *ffs); static void ffs_data_closed(struct ffs_data *ffs); /* Called with ffs->mutex held; take over ownerrship of data. */ static int __must_check __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); static int __must_check __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); /* The function structure ***************************************************/ struct ffs_ep; struct ffs_function { struct usb_configuration *conf; struct usb_gadget *gadget; struct ffs_data *ffs; struct ffs_ep *eps; u8 eps_revmap[16]; short *interfaces_nums; struct usb_function function; }; static struct ffs_function *ffs_func_from_usb(struct usb_function *f) { return container_of(f, struct ffs_function, function); } static void ffs_func_free(struct ffs_function *func); static void ffs_func_eps_disable(struct ffs_function *func); static int __must_check ffs_func_eps_enable(struct ffs_function *func); static int ffs_func_bind(struct usb_configuration *, struct usb_function *); static void ffs_func_unbind(struct usb_configuration *, struct usb_function *); static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned); static void ffs_func_disable(struct usb_function *); static int ffs_func_setup(struct usb_function *, const struct usb_ctrlrequest *); static void ffs_func_suspend(struct usb_function *); static void ffs_func_resume(struct usb_function *); static int ffs_func_revmap_ep(struct ffs_function *func, u8 num); static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); /* The endpoints structures *************************************************/ struct ffs_ep { struct usb_ep *ep; /* P: ffs->eps_lock */ struct usb_request *req; /* P: epfile->mutex */ /* [0]: full speed, [1]: high speed */ struct usb_endpoint_descriptor *descs[2]; u8 num; int status; /* P: epfile->mutex */ }; struct ffs_epfile { /* Protects ep->ep and ep->req. */ struct mutex mutex; wait_queue_head_t wait; struct ffs_data *ffs; struct ffs_ep *ep; /* P: ffs->eps_lock */ struct dentry *dentry; char name[5]; unsigned char in; /* P: ffs->eps_lock */ unsigned char isoc; /* P: ffs->eps_lock */ unsigned char _pad; }; static int __must_check ffs_epfiles_create(struct ffs_data *ffs); static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); static struct inode *__must_check ffs_sb_create_file(struct super_block *sb, const char *name, void *data, const struct file_operations *fops, struct dentry **dentry_p); /* Misc helper functions ****************************************************/ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) __attribute__((warn_unused_result, nonnull)); static char *ffs_prepare_buffer(const char * __user buf, size_t len) __attribute__((warn_unused_result, nonnull)); /* Control file aka ep0 *****************************************************/ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) { struct ffs_data *ffs = req->context; complete_all(&ffs->ep0req_completion); } static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) { struct usb_request *req = ffs->ep0req; int ret; req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); spin_unlock_irq(&ffs->ev.waitq.lock); req->buf = data; req->length = len; INIT_COMPLETION(ffs->ep0req_completion); ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = wait_for_completion_interruptible(&ffs->ep0req_completion); if (unlikely(ret)) { usb_ep_dequeue(ffs->gadget->ep0, req); return -EINTR; } ffs->setup_state = FFS_NO_SETUP; return ffs->ep0req_status; } static int __ffs_ep0_stall(struct ffs_data *ffs) { if (ffs->ev.can_stall) { FVDBG("ep0 stall\n"); usb_ep_set_halt(ffs->gadget->ep0); ffs->setup_state = FFS_NO_SETUP; return -EL2HLT; } else { FDBG("bogus ep0 stall!\n"); return -ESRCH; } } static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) { struct ffs_data *ffs = file->private_data; ssize_t ret; char *data; ENTER(); /* Fast check if setup was canceled */ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) return -EIDRM; /* Acquire mutex */ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret < 0)) return ret; /* Check state */ switch (ffs->state) { case FFS_READ_DESCRIPTORS: case FFS_READ_STRINGS: /* Copy data */ if (unlikely(len < 16)) { ret = -EINVAL; break; } data = ffs_prepare_buffer(buf, len); if (unlikely(IS_ERR(data))) { ret = PTR_ERR(data); break; } /* Handle data */ if (ffs->state == FFS_READ_DESCRIPTORS) { FINFO("read descriptors"); ret = __ffs_data_got_descs(ffs, data, len); if (unlikely(ret < 0)) break; ffs->state = FFS_READ_STRINGS; ret = len; } else { FINFO("read strings"); ret = __ffs_data_got_strings(ffs, data, len); if (unlikely(ret < 0)) break; ret = ffs_epfiles_create(ffs); if (unlikely(ret)) { ffs->state = FFS_CLOSING; break; } ffs->state = FFS_ACTIVE; mutex_unlock(&ffs->mutex); ret = functionfs_ready_callback(ffs); if (unlikely(ret < 0)) { ffs->state = FFS_CLOSING; return ret; } set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); return len; } break; case FFS_ACTIVE: data = NULL; /* We're called from user space, we can use _irq * rather then _irqsave */ spin_lock_irq(&ffs->ev.waitq.lock); switch (FFS_SETUP_STATE(ffs)) { case FFS_SETUP_CANCELED: ret = -EIDRM; goto done_spin; case FFS_NO_SETUP: ret = -ESRCH; goto done_spin; case FFS_SETUP_PENDING: break; } /* FFS_SETUP_PENDING */ if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) { spin_unlock_irq(&ffs->ev.waitq.lock); ret = __ffs_ep0_stall(ffs); break; } /* FFS_SETUP_PENDING and not stall */ len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); spin_unlock_irq(&ffs->ev.waitq.lock); data = ffs_prepare_buffer(buf, len); if (unlikely(IS_ERR(data))) { ret = PTR_ERR(data); break; } spin_lock_irq(&ffs->ev.waitq.lock); /* We are guaranteed to be still in FFS_ACTIVE state * but the state of setup could have changed from * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need * to check for that. If that happened we copied data * from user space in vain but it's unlikely. */ /* For sure we are not in FFS_NO_SETUP since this is * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP * transition can be performed and it's protected by * mutex. */ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) { ret = -EIDRM; done_spin: spin_unlock_irq(&ffs->ev.waitq.lock); } else { /* unlocks spinlock */ ret = __ffs_ep0_queue_wait(ffs, data, len); } kfree(data); break; default: ret = -EBADFD; break; } mutex_unlock(&ffs->mutex); return ret; } static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, size_t n) { /* We are holding ffs->ev.waitq.lock and ffs->mutex and we need * to release them. */ struct usb_functionfs_event events[n]; unsigned i = 0; memset(events, 0, sizeof events); do { events[i].type = ffs->ev.types[i]; if (events[i].type == FUNCTIONFS_SETUP) { events[i].u.setup = ffs->ev.setup; ffs->setup_state = FFS_SETUP_PENDING; } } while (++i < n); if (n < ffs->ev.count) { ffs->ev.count -= n; memmove(ffs->ev.types, ffs->ev.types + n, ffs->ev.count * sizeof *ffs->ev.types); } else { ffs->ev.count = 0; } spin_unlock_irq(&ffs->ev.waitq.lock); mutex_unlock(&ffs->mutex); return unlikely(__copy_to_user(buf, events, sizeof events)) ? -EFAULT : sizeof events; } static ssize_t ffs_ep0_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) { struct ffs_data *ffs = file->private_data; char *data = NULL; size_t n; int ret; ENTER(); /* Fast check if setup was canceled */ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) return -EIDRM; /* Acquire mutex */ ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret < 0)) return ret; /* Check state */ if (ffs->state != FFS_ACTIVE) { ret = -EBADFD; goto done_mutex; } /* We're called from user space, we can use _irq rather then * _irqsave */ spin_lock_irq(&ffs->ev.waitq.lock); switch (FFS_SETUP_STATE(ffs)) { case FFS_SETUP_CANCELED: ret = -EIDRM; break; case FFS_NO_SETUP: n = len / sizeof(struct usb_functionfs_event); if (unlikely(!n)) { ret = -EINVAL; break; } if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) { ret = -EAGAIN; break; } if (unlikely(wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, ffs->ev.count))) { ret = -EINTR; break; } return __ffs_ep0_read_events(ffs, buf, min(n, (size_t)ffs->ev.count)); case FFS_SETUP_PENDING: if (ffs->ev.setup.bRequestType & USB_DIR_IN) { spin_unlock_irq(&ffs->ev.waitq.lock); ret = __ffs_ep0_stall(ffs); goto done_mutex; } len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); spin_unlock_irq(&ffs->ev.waitq.lock); if (likely(len)) { data = kmalloc(len, GFP_KERNEL); if (unlikely(!data)) { ret = -ENOMEM; goto done_mutex; } } spin_lock_irq(&ffs->ev.waitq.lock); /* See ffs_ep0_write() */ if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) { ret = -EIDRM; break; } /* unlocks spinlock */ ret = __ffs_ep0_queue_wait(ffs, data, len); if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) ret = -EFAULT; goto done_mutex; default: ret = -EBADFD; break; } spin_unlock_irq(&ffs->ev.waitq.lock); done_mutex: mutex_unlock(&ffs->mutex); kfree(data); return ret; } static int ffs_ep0_open(struct inode *inode, struct file *file) { struct ffs_data *ffs = inode->i_private; ENTER(); if (unlikely(ffs->state == FFS_CLOSING)) return -EBUSY; file->private_data = ffs; ffs_data_opened(ffs); return 0; } static int ffs_ep0_release(struct inode *inode, struct file *file) { struct ffs_data *ffs = file->private_data; ENTER(); ffs_data_closed(ffs); return 0; } static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) { struct ffs_data *ffs = file->private_data; struct usb_gadget *gadget = ffs->gadget; long ret; ENTER(); if (code == FUNCTIONFS_INTERFACE_REVMAP) { struct ffs_function *func = ffs->func; ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; } else if (gadget->ops->ioctl) { lock_kernel(); ret = gadget->ops->ioctl(gadget, code, value); unlock_kernel(); } else { ret = -ENOTTY; } return ret; } static const struct file_operations ffs_ep0_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .open = ffs_ep0_open, .write = ffs_ep0_write, .read = ffs_ep0_read, .release = ffs_ep0_release, .unlocked_ioctl = ffs_ep0_ioctl, }; /* "Normal" endpoints operations ********************************************/ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { ENTER(); if (likely(req->context)) { struct ffs_ep *ep = _ep->driver_data; ep->status = req->status ? req->status : req->actual; complete(req->context); } } static ssize_t ffs_epfile_io(struct file *file, char __user *buf, size_t len, int read) { struct ffs_epfile *epfile = file->private_data; struct ffs_ep *ep; char *data = NULL; ssize_t ret; int halt; goto first_try; do { spin_unlock_irq(&epfile->ffs->eps_lock); mutex_unlock(&epfile->mutex); first_try: /* Are we still active? */ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { ret = -ENODEV; goto error; } /* Wait for endpoint to be enabled */ ep = epfile->ep; if (!ep) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto error; } if (unlikely(wait_event_interruptible (epfile->wait, (ep = epfile->ep)))) { ret = -EINTR; goto error; } } /* Do we halt? */ halt = !read == !epfile->in; if (halt && epfile->isoc) { ret = -EINVAL; goto error; } /* Allocate & copy */ if (!halt && !data) { data = kzalloc(len, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; if (!read && unlikely(__copy_from_user(data, buf, len))) { ret = -EFAULT; goto error; } } /* We will be using request */ ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK); if (unlikely(ret)) goto error; /* We're called from user space, we can use _irq rather then * _irqsave */ spin_lock_irq(&epfile->ffs->eps_lock); /* While we were acquiring mutex endpoint got disabled * or changed? */ } while (unlikely(epfile->ep != ep)); /* Halt */ if (unlikely(halt)) { if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) usb_ep_set_halt(ep->ep); spin_unlock_irq(&epfile->ffs->eps_lock); ret = -EBADMSG; } else { /* Fire the request */ DECLARE_COMPLETION_ONSTACK(done); struct usb_request *req = ep->req; req->context = &done; req->complete = ffs_epfile_io_complete; req->buf = data; req->length = len; ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); spin_unlock_irq(&epfile->ffs->eps_lock); if (unlikely(ret < 0)) { /* nop */ } else if (unlikely(wait_for_completion_interruptible(&done))) { ret = -EINTR; usb_ep_dequeue(ep->ep, req); } else { ret = ep->status; if (read && ret > 0 && unlikely(copy_to_user(buf, data, ret))) ret = -EFAULT; } } mutex_unlock(&epfile->mutex); error: kfree(data); return ret; } static ssize_t ffs_epfile_write(struct file *file, const char __user *buf, size_t len, loff_t *ptr) { ENTER(); return ffs_epfile_io(file, (char __user *)buf, len, 0); } static ssize_t ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) { ENTER(); return ffs_epfile_io(file, buf, len, 1); } static int ffs_epfile_open(struct inode *inode, struct file *file) { struct ffs_epfile *epfile = inode->i_private; ENTER(); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; file->private_data = epfile; ffs_data_opened(epfile->ffs); return 0; } static int ffs_epfile_release(struct inode *inode, struct file *file) { struct ffs_epfile *epfile = inode->i_private; ENTER(); ffs_data_closed(epfile->ffs); return 0; } static long ffs_epfile_ioctl(struct file *file, unsigned code, unsigned long value) { struct ffs_epfile *epfile = file->private_data; int ret; ENTER(); if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) return -ENODEV; spin_lock_irq(&epfile->ffs->eps_lock); if (likely(epfile->ep)) { switch (code) { case FUNCTIONFS_FIFO_STATUS: ret = usb_ep_fifo_status(epfile->ep->ep); break; case FUNCTIONFS_FIFO_FLUSH: usb_ep_fifo_flush(epfile->ep->ep); ret = 0; break; case FUNCTIONFS_CLEAR_HALT: ret = usb_ep_clear_halt(epfile->ep->ep); break; case FUNCTIONFS_ENDPOINT_REVMAP: ret = epfile->ep->num; break; default: ret = -ENOTTY; } } else { ret = -ENODEV; } spin_unlock_irq(&epfile->ffs->eps_lock); return ret; } static const struct file_operations ffs_epfile_operations = { .owner = THIS_MODULE, .llseek = no_llseek, .open = ffs_epfile_open, .write = ffs_epfile_write, .read = ffs_epfile_read, .release = ffs_epfile_release, .unlocked_ioctl = ffs_epfile_ioctl, }; /* File system and super block operations ***********************************/ /* * Mounting the filesystem creates a controller file, used first for * function configuration then later for event monitoring. */ static struct inode *__must_check ffs_sb_make_inode(struct super_block *sb, void *data, const struct file_operations *fops, const struct inode_operations *iops, struct ffs_file_perms *perms) { struct inode *inode; ENTER(); inode = new_inode(sb); if (likely(inode)) { struct timespec current_time = CURRENT_TIME; inode->i_mode = perms->mode; inode->i_uid = perms->uid; inode->i_gid = perms->gid; inode->i_atime = current_time; inode->i_mtime = current_time; inode->i_ctime = current_time; inode->i_private = data; if (fops) inode->i_fop = fops; if (iops) inode->i_op = iops; } return inode; } /* Create "regular" file */ static struct inode *ffs_sb_create_file(struct super_block *sb, const char *name, void *data, const struct file_operations *fops, struct dentry **dentry_p) { struct ffs_data *ffs = sb->s_fs_info; struct dentry *dentry; struct inode *inode; ENTER(); dentry = d_alloc_name(sb->s_root, name); if (unlikely(!dentry)) return NULL; inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms); if (unlikely(!inode)) { dput(dentry); return NULL; } d_add(dentry, inode); if (dentry_p) *dentry_p = dentry; return inode; } /* Super block */ static const struct super_operations ffs_sb_operations = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, }; struct ffs_sb_fill_data { struct ffs_file_perms perms; umode_t root_mode; const char *dev_name; }; static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) { struct ffs_sb_fill_data *data = _data; struct inode *inode; struct dentry *d; struct ffs_data *ffs; ENTER(); /* Initialize data */ ffs = ffs_data_new(); if (unlikely(!ffs)) goto enomem0; ffs->sb = sb; ffs->dev_name = data->dev_name; ffs->file_perms = data->perms; sb->s_fs_info = ffs; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = FUNCTIONFS_MAGIC; sb->s_op = &ffs_sb_operations; sb->s_time_gran = 1; /* Root inode */ data->perms.mode = data->root_mode; inode = ffs_sb_make_inode(sb, NULL, &simple_dir_operations, &simple_dir_inode_operations, &data->perms); if (unlikely(!inode)) goto enomem1; d = d_alloc_root(inode); if (unlikely(!d)) goto enomem2; sb->s_root = d; /* EP0 file */ if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, &ffs_ep0_operations, NULL))) goto enomem3; return 0; enomem3: dput(d); enomem2: iput(inode); enomem1: ffs_data_put(ffs); enomem0: return -ENOMEM; } static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) { ENTER(); if (!opts || !*opts) return 0; for (;;) { char *end, *eq, *comma; unsigned long value; /* Option limit */ comma = strchr(opts, ','); if (comma) *comma = 0; /* Value limit */ eq = strchr(opts, '='); if (unlikely(!eq)) { FERR("'=' missing in %s", opts); return -EINVAL; } *eq = 0; /* Parse value */ value = simple_strtoul(eq + 1, &end, 0); if (unlikely(*end != ',' && *end != 0)) { FERR("%s: invalid value: %s", opts, eq + 1); return -EINVAL; } /* Interpret option */ switch (eq - opts) { case 5: if (!memcmp(opts, "rmode", 5)) data->root_mode = (value & 0555) | S_IFDIR; else if (!memcmp(opts, "fmode", 5)) data->perms.mode = (value & 0666) | S_IFREG; else goto invalid; break; case 4: if (!memcmp(opts, "mode", 4)) { data->root_mode = (value & 0555) | S_IFDIR; data->perms.mode = (value & 0666) | S_IFREG; } else { goto invalid; } break; case 3: if (!memcmp(opts, "uid", 3)) data->perms.uid = value; else if (!memcmp(opts, "gid", 3)) data->perms.gid = value; else goto invalid; break; default: invalid: FERR("%s: invalid option", opts); return -EINVAL; } /* Next iteration */ if (!comma) break; opts = comma + 1; } return 0; } /* "mount -t functionfs dev_name /dev/function" ends up here */ static int ffs_fs_get_sb(struct file_system_type *t, int flags, const char *dev_name, void *opts, struct vfsmount *mnt) { struct ffs_sb_fill_data data = { .perms = { .mode = S_IFREG | 0600, .uid = 0, .gid = 0 }, .root_mode = S_IFDIR | 0500, }; int ret; ENTER(); ret = functionfs_check_dev_callback(dev_name); if (unlikely(ret < 0)) return ret; ret = ffs_fs_parse_opts(&data, opts); if (unlikely(ret < 0)) return ret; data.dev_name = dev_name; return get_sb_single(t, flags, &data, ffs_sb_fill, mnt); } static void ffs_fs_kill_sb(struct super_block *sb) { void *ptr; ENTER(); kill_litter_super(sb); ptr = xchg(&sb->s_fs_info, NULL); if (ptr) ffs_data_put(ptr); } static struct file_system_type ffs_fs_type = { .owner = THIS_MODULE, .name = "functionfs", .get_sb = ffs_fs_get_sb, .kill_sb = ffs_fs_kill_sb, }; /* Driver's main init/cleanup functions *************************************/ static int functionfs_init(void) { int ret; ENTER(); ret = register_filesystem(&ffs_fs_type); if (likely(!ret)) FINFO("file system registered"); else FERR("failed registering file system (%d)", ret); return ret; } static void functionfs_cleanup(void) { ENTER(); FINFO("unloading"); unregister_filesystem(&ffs_fs_type); } /* ffs_data and ffs_function construction and destruction code **************/ static void ffs_data_clear(struct ffs_data *ffs); static void ffs_data_reset(struct ffs_data *ffs); static void ffs_data_get(struct ffs_data *ffs) { ENTER(); atomic_inc(&ffs->ref); } static void ffs_data_opened(struct ffs_data *ffs) { ENTER(); atomic_inc(&ffs->ref); atomic_inc(&ffs->opened); } static void ffs_data_put(struct ffs_data *ffs) { ENTER(); if (unlikely(atomic_dec_and_test(&ffs->ref))) { FINFO("%s(): freeing", __func__); ffs_data_clear(ffs); BUG_ON(mutex_is_locked(&ffs->mutex) || spin_is_locked(&ffs->ev.waitq.lock) || waitqueue_active(&ffs->ev.waitq) || waitqueue_active(&ffs->ep0req_completion.wait)); kfree(ffs); } } static void ffs_data_closed(struct ffs_data *ffs) { ENTER(); if (atomic_dec_and_test(&ffs->opened)) { ffs->state = FFS_CLOSING; ffs_data_reset(ffs); } ffs_data_put(ffs); } static struct ffs_data *ffs_data_new(void) { struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); if (unlikely(!ffs)) return 0; ENTER(); atomic_set(&ffs->ref, 1); atomic_set(&ffs->opened, 0); ffs->state = FFS_READ_DESCRIPTORS; mutex_init(&ffs->mutex); spin_lock_init(&ffs->eps_lock); init_waitqueue_head(&ffs->ev.waitq); init_completion(&ffs->ep0req_completion); /* XXX REVISIT need to update it in some places, or do we? */ ffs->ev.can_stall = 1; return ffs; } static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) functionfs_closed_callback(ffs); BUG_ON(ffs->gadget); if (ffs->epfiles) ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); kfree(ffs->raw_descs); kfree(ffs->raw_strings); kfree(ffs->stringtabs); } static void ffs_data_reset(struct ffs_data *ffs) { ENTER(); ffs_data_clear(ffs); ffs->epfiles = NULL; ffs->raw_descs = NULL; ffs->raw_strings = NULL; ffs->stringtabs = NULL; ffs->raw_descs_length = 0; ffs->raw_fs_descs_length = 0; ffs->fs_descs_count = 0; ffs->hs_descs_count = 0; ffs->strings_count = 0; ffs->interfaces_count = 0; ffs->eps_count = 0; ffs->ev.count = 0; ffs->state = FFS_READ_DESCRIPTORS; ffs->setup_state = FFS_NO_SETUP; ffs->flags = 0; } static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) { unsigned i, count; ENTER(); if (WARN_ON(ffs->state != FFS_ACTIVE || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) return -EBADFD; ffs_data_get(ffs); ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); if (unlikely(!ffs->ep0req)) return -ENOMEM; ffs->ep0req->complete = ffs_ep0_complete; ffs->ep0req->context = ffs; /* Get strings identifiers */ for (count = ffs->strings_count, i = 0; i < count; ++i) { struct usb_gadget_strings **lang; int id = usb_string_id(cdev); if (unlikely(id < 0)) { usb_ep_free_request(cdev->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; return id; } lang = ffs->stringtabs; do { (*lang)->strings[i].id = id; ++lang; } while (*lang); } ffs->gadget = cdev->gadget; return 0; } static void functionfs_unbind(struct ffs_data *ffs) { ENTER(); if (!WARN_ON(!ffs->gadget)) { usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); ffs->ep0req = NULL; ffs->gadget = NULL; ffs_data_put(ffs); } } static int ffs_epfiles_create(struct ffs_data *ffs) { struct ffs_epfile *epfile, *epfiles; unsigned i, count; ENTER(); count = ffs->eps_count; epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL); if (!epfiles) return -ENOMEM; epfile = epfiles; for (i = 1; i <= count; ++i, ++epfile) { epfile->ffs = ffs; mutex_init(&epfile->mutex); init_waitqueue_head(&epfile->wait); sprintf(epfiles->name, "ep%u", i); if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile, &ffs_epfile_operations, &epfile->dentry))) { ffs_epfiles_destroy(epfiles, i - 1); return -ENOMEM; } } ffs->epfiles = epfiles; return 0; } static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) { struct ffs_epfile *epfile = epfiles; ENTER(); for (; count; --count, ++epfile) { BUG_ON(mutex_is_locked(&epfile->mutex) || waitqueue_active(&epfile->wait)); if (epfile->dentry) { d_delete(epfile->dentry); dput(epfile->dentry); epfile->dentry = NULL; } } kfree(epfiles); } static int functionfs_add(struct usb_composite_dev *cdev, struct usb_configuration *c, struct ffs_data *ffs) { struct ffs_function *func; int ret; ENTER(); func = kzalloc(sizeof *func, GFP_KERNEL); if (unlikely(!func)) return -ENOMEM; func->function.name = "Function FS Gadget"; func->function.strings = ffs->stringtabs; func->function.bind = ffs_func_bind; func->function.unbind = ffs_func_unbind; func->function.set_alt = ffs_func_set_alt; /*func->function.get_alt = ffs_func_get_alt;*/ func->function.disable = ffs_func_disable; func->function.setup = ffs_func_setup; func->function.suspend = ffs_func_suspend; func->function.resume = ffs_func_resume; func->conf = c; func->gadget = cdev->gadget; func->ffs = ffs; ffs_data_get(ffs); ret = usb_add_function(c, &func->function); if (unlikely(ret)) ffs_func_free(func); return ret; } static void ffs_func_free(struct ffs_function *func) { ENTER(); ffs_data_put(func->ffs); kfree(func->eps); /* eps and interfaces_nums are allocated in the same chunk so * only one free is required. Descriptors are also allocated * in the same chunk. */ kfree(func); } static void ffs_func_eps_disable(struct ffs_function *func) { struct ffs_ep *ep = func->eps; struct ffs_epfile *epfile = func->ffs->epfiles; unsigned count = func->ffs->eps_count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); do { /* pending requests get nuked */ if (likely(ep->ep)) usb_ep_disable(ep->ep); epfile->ep = NULL; ++ep; ++epfile; } while (--count); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); } static int ffs_func_eps_enable(struct ffs_function *func) { struct ffs_data *ffs = func->ffs; struct ffs_ep *ep = func->eps; struct ffs_epfile *epfile = ffs->epfiles; unsigned count = ffs->eps_count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); do { struct usb_endpoint_descriptor *ds; ds = ep->descs[ep->descs[1] ? 1 : 0]; ep->ep->driver_data = ep; ret = usb_ep_enable(ep->ep, ds); if (likely(!ret)) { epfile->ep = ep; epfile->in = usb_endpoint_dir_in(ds); epfile->isoc = usb_endpoint_xfer_isoc(ds); } else { break; } wake_up(&epfile->wait); ++ep; ++epfile; } while (--count); spin_unlock_irqrestore(&func->ffs->eps_lock, flags); return ret; } /* Parsing and building descriptors and strings *****************************/ /* This validates if data pointed by data is a valid USB descriptor as * well as record how many interfaces, endpoints and strings are * required by given configuration. Returns address afther the * descriptor or NULL if data is invalid. */ enum ffs_entity_type { FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT }; typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, u8 *valuep, struct usb_descriptor_header *desc, void *priv); static int __must_check ffs_do_desc(char *data, unsigned len, ffs_entity_callback entity, void *priv) { struct usb_descriptor_header *_ds = (void *)data; u8 length; int ret; ENTER(); /* At least two bytes are required: length and type */ if (len < 2) { FVDBG("descriptor too short"); return -EINVAL; } /* If we have at least as many bytes as the descriptor takes? */ length = _ds->bLength; if (len < length) { FVDBG("descriptor longer then available data"); return -EINVAL; } #define __entity_check_INTERFACE(val) 1 #define __entity_check_STRING(val) (val) #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) #define __entity(type, val) do { \ FVDBG("entity " #type "(%02x)", (val)); \ if (unlikely(!__entity_check_ ##type(val))) { \ FVDBG("invalid entity's value"); \ return -EINVAL; \ } \ ret = entity(FFS_ ##type, &val, _ds, priv); \ if (unlikely(ret < 0)) { \ FDBG("entity " #type "(%02x); ret = %d", \ (val), ret); \ return ret; \ } \ } while (0) /* Parse descriptor depending on type. */ switch (_ds->bDescriptorType) { case USB_DT_DEVICE: case USB_DT_CONFIG: case USB_DT_STRING: case USB_DT_DEVICE_QUALIFIER: /* function can't have any of those */ FVDBG("descriptor reserved for gadget: %d", _ds->bDescriptorType); return -EINVAL; case USB_DT_INTERFACE: { struct usb_interface_descriptor *ds = (void *)_ds; FVDBG("interface descriptor"); if (length != sizeof *ds) goto inv_length; __entity(INTERFACE, ds->bInterfaceNumber); if (ds->iInterface) __entity(STRING, ds->iInterface); } break; case USB_DT_ENDPOINT: { struct usb_endpoint_descriptor *ds = (void *)_ds; FVDBG("endpoint descriptor"); if (length != USB_DT_ENDPOINT_SIZE && length != USB_DT_ENDPOINT_AUDIO_SIZE) goto inv_length; __entity(ENDPOINT, ds->bEndpointAddress); } break; case USB_DT_OTG: if (length != sizeof(struct usb_otg_descriptor)) goto inv_length; break; case USB_DT_INTERFACE_ASSOCIATION: { struct usb_interface_assoc_descriptor *ds = (void *)_ds; FVDBG("interface association descriptor"); if (length != sizeof *ds) goto inv_length; if (ds->iFunction) __entity(STRING, ds->iFunction); } break; case USB_DT_OTHER_SPEED_CONFIG: case USB_DT_INTERFACE_POWER: case USB_DT_DEBUG: case USB_DT_SECURITY: case USB_DT_CS_RADIO_CONTROL: /* TODO */ FVDBG("unimplemented descriptor: %d", _ds->bDescriptorType); return -EINVAL; default: /* We should never be here */ FVDBG("unknown descriptor: %d", _ds->bDescriptorType); return -EINVAL; inv_length: FVDBG("invalid length: %d (descriptor %d)", _ds->bLength, _ds->bDescriptorType); return -EINVAL; } #undef __entity #undef __entity_check_DESCRIPTOR #undef __entity_check_INTERFACE #undef __entity_check_STRING #undef __entity_check_ENDPOINT return length; } static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, ffs_entity_callback entity, void *priv) { const unsigned _len = len; unsigned long num = 0; ENTER(); for (;;) { int ret; if (num == count) data = NULL; /* Record "descriptor" entitny */ ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); if (unlikely(ret < 0)) { FDBG("entity DESCRIPTOR(%02lx); ret = %d", num, ret); return ret; } if (!data) return _len - len; ret = ffs_do_desc(data, len, entity, priv); if (unlikely(ret < 0)) { FDBG("%s returns %d", __func__, ret); return ret; } len -= ret; data += ret; ++num; } } static int __ffs_data_do_entity(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct ffs_data *ffs = priv; ENTER(); switch (type) { case FFS_DESCRIPTOR: break; case FFS_INTERFACE: /* Interfaces are indexed from zero so if we * encountered interface "n" then there are at least * "n+1" interfaces. */ if (*valuep >= ffs->interfaces_count) ffs->interfaces_count = *valuep + 1; break; case FFS_STRING: /* Strings are indexed from 1 (0 is magic ;) reserved * for languages list or some such) */ if (*valuep > ffs->strings_count) ffs->strings_count = *valuep; break; case FFS_ENDPOINT: /* Endpoints are indexed from 1 as well. */ if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count) ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK); break; } return 0; } static int __ffs_data_got_descs(struct ffs_data *ffs, char *const _data, size_t len) { unsigned fs_count, hs_count; int fs_len, ret = -EINVAL; char *data = _data; ENTER(); if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC || get_unaligned_le32(data + 4) != len)) goto error; fs_count = get_unaligned_le32(data + 8); hs_count = get_unaligned_le32(data + 12); if (!fs_count && !hs_count) goto einval; data += 16; len -= 16; if (likely(fs_count)) { fs_len = ffs_do_descs(fs_count, data, len, __ffs_data_do_entity, ffs); if (unlikely(fs_len < 0)) { ret = fs_len; goto error; } data += fs_len; len -= fs_len; } else { fs_len = 0; } if (likely(hs_count)) { ret = ffs_do_descs(hs_count, data, len, __ffs_data_do_entity, ffs); if (unlikely(ret < 0)) goto error; } else { ret = 0; } if (unlikely(len != ret)) goto einval; ffs->raw_fs_descs_length = fs_len; ffs->raw_descs_length = fs_len + ret; ffs->raw_descs = _data; ffs->fs_descs_count = fs_count; ffs->hs_descs_count = hs_count; return 0; einval: ret = -EINVAL; error: kfree(_data); return ret; } static int __ffs_data_got_strings(struct ffs_data *ffs, char *const _data, size_t len) { u32 str_count, needed_count, lang_count; struct usb_gadget_strings **stringtabs, *t; struct usb_string *strings, *s; const char *data = _data; ENTER(); if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || get_unaligned_le32(data + 4) != len)) goto error; str_count = get_unaligned_le32(data + 8); lang_count = get_unaligned_le32(data + 12); /* if one is zero the other must be zero */ if (unlikely(!str_count != !lang_count)) goto error; /* Do we have at least as many strings as descriptors need? */ needed_count = ffs->strings_count; if (unlikely(str_count < needed_count)) goto error; /* If we don't need any strings just return and free all * memory */ if (!needed_count) { kfree(_data); return 0; } /* Allocate */ { /* Allocate everything in one chunk so there's less * maintanance. */ struct { struct usb_gadget_strings *stringtabs[lang_count + 1]; struct usb_gadget_strings stringtab[lang_count]; struct usb_string strings[lang_count*(needed_count+1)]; } *d; unsigned i = 0; d = kmalloc(sizeof *d, GFP_KERNEL); if (unlikely(!d)) { kfree(_data); return -ENOMEM; } stringtabs = d->stringtabs; t = d->stringtab; i = lang_count; do { *stringtabs++ = t++; } while (--i); *stringtabs = NULL; stringtabs = d->stringtabs; t = d->stringtab; s = d->strings; strings = s; } /* For each language */ data += 16; len -= 16; do { /* lang_count > 0 so we can use do-while */ unsigned needed = needed_count; if (unlikely(len < 3)) goto error_free; t->language = get_unaligned_le16(data); t->strings = s; ++t; data += 2; len -= 2; /* For each string */ do { /* str_count > 0 so we can use do-while */ size_t length = strnlen(data, len); if (unlikely(length == len)) goto error_free; /* user may provide more strings then we need, * if that's the case we simply ingore the * rest */ if (likely(needed)) { /* s->id will be set while adding * function to configuration so for * now just leave garbage here. */ s->s = data; --needed; ++s; } data += length + 1; len -= length + 1; } while (--str_count); s->id = 0; /* terminator */ s->s = NULL; ++s; } while (--lang_count); /* Some garbage left? */ if (unlikely(len)) goto error_free; /* Done! */ ffs->stringtabs = stringtabs; ffs->raw_strings = _data; return 0; error_free: kfree(stringtabs); error: kfree(_data); return -EINVAL; } /* Events handling and management *******************************************/ static void __ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type) { enum usb_functionfs_event_type rem_type1, rem_type2 = type; int neg = 0; /* Abort any unhandled setup */ /* We do not need to worry about some cmpxchg() changing value * of ffs->setup_state without holding the lock because when * state is FFS_SETUP_PENDING cmpxchg() in several places in * the source does nothing. */ if (ffs->setup_state == FFS_SETUP_PENDING) ffs->setup_state = FFS_SETUP_CANCELED; switch (type) { case FUNCTIONFS_RESUME: rem_type2 = FUNCTIONFS_SUSPEND; /* FALL THGOUTH */ case FUNCTIONFS_SUSPEND: case FUNCTIONFS_SETUP: rem_type1 = type; /* discard all similar events */ break; case FUNCTIONFS_BIND: case FUNCTIONFS_UNBIND: case FUNCTIONFS_DISABLE: case FUNCTIONFS_ENABLE: /* discard everything other then power management. */ rem_type1 = FUNCTIONFS_SUSPEND; rem_type2 = FUNCTIONFS_RESUME; neg = 1; break; default: BUG(); } { u8 *ev = ffs->ev.types, *out = ev; unsigned n = ffs->ev.count; for (; n; --n, ++ev) if ((*ev == rem_type1 || *ev == rem_type2) == neg) *out++ = *ev; else FVDBG("purging event %d", *ev); ffs->ev.count = out - ffs->ev.types; } FVDBG("adding event %d", type); ffs->ev.types[ffs->ev.count++] = type; wake_up_locked(&ffs->ev.waitq); } static void ffs_event_add(struct ffs_data *ffs, enum usb_functionfs_event_type type) { unsigned long flags; spin_lock_irqsave(&ffs->ev.waitq.lock, flags); __ffs_event_add(ffs, type); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); } /* Bind/unbind USB function hooks *******************************************/ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct usb_endpoint_descriptor *ds = (void *)desc; struct ffs_function *func = priv; struct ffs_ep *ffs_ep; /* If hs_descriptors is not NULL then we are reading hs * descriptors now */ const int isHS = func->function.hs_descriptors != NULL; unsigned idx; if (type != FFS_DESCRIPTOR) return 0; if (isHS) func->function.hs_descriptors[(long)valuep] = desc; else func->function.descriptors[(long)valuep] = desc; if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) return 0; idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1; ffs_ep = func->eps + idx; if (unlikely(ffs_ep->descs[isHS])) { FVDBG("two %sspeed descriptors for EP %d", isHS ? "high" : "full", ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); return -EINVAL; } ffs_ep->descs[isHS] = ds; ffs_dump_mem(": Original ep desc", ds, ds->bLength); if (ffs_ep->ep) { ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress; if (!ds->wMaxPacketSize) ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize; } else { struct usb_request *req; struct usb_ep *ep; FVDBG("autoconfig"); ep = usb_ep_autoconfig(func->gadget, ds); if (unlikely(!ep)) return -ENOTSUPP; ep->driver_data = func->eps + idx;; req = usb_ep_alloc_request(ep, GFP_KERNEL); if (unlikely(!req)) return -ENOMEM; ffs_ep->ep = ep; ffs_ep->req = req; func->eps_revmap[ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK] = idx + 1; } ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); return 0; } static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, struct usb_descriptor_header *desc, void *priv) { struct ffs_function *func = priv; unsigned idx; u8 newValue; switch (type) { default: case FFS_DESCRIPTOR: /* Handled in previous pass by __ffs_func_bind_do_descs() */ return 0; case FFS_INTERFACE: idx = *valuep; if (func->interfaces_nums[idx] < 0) { int id = usb_interface_id(func->conf, &func->function); if (unlikely(id < 0)) return id; func->interfaces_nums[idx] = id; } newValue = func->interfaces_nums[idx]; break; case FFS_STRING: /* String' IDs are allocated when fsf_data is bound to cdev */ newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id; break; case FFS_ENDPOINT: /* USB_DT_ENDPOINT are handled in * __ffs_func_bind_do_descs(). */ if (desc->bDescriptorType == USB_DT_ENDPOINT) return 0; idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1; if (unlikely(!func->eps[idx].ep)) return -EINVAL; { struct usb_endpoint_descriptor **descs; descs = func->eps[idx].descs; newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress; } break; } FVDBG("%02x -> %02x", *valuep, newValue); *valuep = newValue; return 0; } static int ffs_func_bind(struct usb_configuration *c, struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; const int full = !!func->ffs->fs_descs_count; const int high = gadget_is_dualspeed(func->gadget) && func->ffs->hs_descs_count; int ret; /* Make it a single chunk, less management later on */ struct { struct ffs_ep eps[ffs->eps_count]; struct usb_descriptor_header *fs_descs[full ? ffs->fs_descs_count + 1 : 0]; struct usb_descriptor_header *hs_descs[high ? ffs->hs_descs_count + 1 : 0]; short inums[ffs->interfaces_count]; char raw_descs[high ? ffs->raw_descs_length : ffs->raw_fs_descs_length]; } *data; ENTER(); /* Only high speed but not supported by gadget? */ if (unlikely(!(full | high))) return -ENOTSUPP; /* Allocate */ data = kmalloc(sizeof *data, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; /* Zero */ memset(data->eps, 0, sizeof data->eps); memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs); memset(data->inums, 0xff, sizeof data->inums); for (ret = ffs->eps_count; ret; --ret) data->eps[ret].num = -1; /* Save pointers */ func->eps = data->eps; func->interfaces_nums = data->inums; /* Go throught all the endpoint descriptors and allocate * endpoints first, so that later we can rewrite the endpoint * numbers without worying that it may be described later on. */ if (likely(full)) { func->function.descriptors = data->fs_descs; ret = ffs_do_descs(ffs->fs_descs_count, data->raw_descs, sizeof data->raw_descs, __ffs_func_bind_do_descs, func); if (unlikely(ret < 0)) goto error; } else { ret = 0; } if (likely(high)) { func->function.hs_descriptors = data->hs_descs; ret = ffs_do_descs(ffs->hs_descs_count, data->raw_descs + ret, (sizeof data->raw_descs) - ret, __ffs_func_bind_do_descs, func); } /* Now handle interface numbers allocation and interface and * enpoint numbers rewritting. We can do that in one go * now. */ ret = ffs_do_descs(ffs->fs_descs_count + (high ? ffs->hs_descs_count : 0), data->raw_descs, sizeof data->raw_descs, __ffs_func_bind_do_nums, func); if (unlikely(ret < 0)) goto error; /* And we're done */ ffs_event_add(ffs, FUNCTIONFS_BIND); return 0; error: /* XXX Do we need to release all claimed endpoints here? */ return ret; } /* Other USB function hooks *************************************************/ static void ffs_func_unbind(struct usb_configuration *c, struct usb_function *f) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; ENTER(); if (ffs->func == func) { ffs_func_eps_disable(func); ffs->func = NULL; } ffs_event_add(ffs, FUNCTIONFS_UNBIND); ffs_func_free(func); } static int ffs_func_set_alt(struct usb_function *f, unsigned interface, unsigned alt) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; int ret = 0, intf; if (alt != (unsigned)-1) { intf = ffs_func_revmap_intf(func, interface); if (unlikely(intf < 0)) return intf; } if (ffs->func) ffs_func_eps_disable(ffs->func); if (ffs->state != FFS_ACTIVE) return -ENODEV; if (alt == (unsigned)-1) { ffs->func = NULL; ffs_event_add(ffs, FUNCTIONFS_DISABLE); return 0; } ffs->func = func; ret = ffs_func_eps_enable(func); if (likely(ret >= 0)) ffs_event_add(ffs, FUNCTIONFS_ENABLE); return ret; } static void ffs_func_disable(struct usb_function *f) { ffs_func_set_alt(f, 0, (unsigned)-1); } static int ffs_func_setup(struct usb_function *f, const struct usb_ctrlrequest *creq) { struct ffs_function *func = ffs_func_from_usb(f); struct ffs_data *ffs = func->ffs; unsigned long flags; int ret; ENTER(); FVDBG("creq->bRequestType = %02x", creq->bRequestType); FVDBG("creq->bRequest = %02x", creq->bRequest); FVDBG("creq->wValue = %04x", le16_to_cpu(creq->wValue)); FVDBG("creq->wIndex = %04x", le16_to_cpu(creq->wIndex)); FVDBG("creq->wLength = %04x", le16_to_cpu(creq->wLength)); /* Most requests directed to interface go throught here * (notable exceptions are set/get interface) so we need to * handle them. All other either handled by composite or * passed to usb_configuration->setup() (if one is set). No * matter, we will handle requests directed to endpoint here * as well (as it's straightforward) but what to do with any * other request? */ if (ffs->state != FFS_ACTIVE) return -ENODEV; switch (creq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex)); if (unlikely(ret < 0)) return ret; break; case USB_RECIP_ENDPOINT: ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex)); if (unlikely(ret < 0)) return ret; break; default: return -EOPNOTSUPP; } spin_lock_irqsave(&ffs->ev.waitq.lock, flags); ffs->ev.setup = *creq; ffs->ev.setup.wIndex = cpu_to_le16(ret); __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); return 0; } static void ffs_func_suspend(struct usb_function *f) { ENTER(); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); } static void ffs_func_resume(struct usb_function *f) { ENTER(); ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); } /* Enpoint and interface numbers reverse mapping ****************************/ static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) { num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK]; return num ? num : -EDOM; } static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) { short *nums = func->interfaces_nums; unsigned count = func->ffs->interfaces_count; for (; count; --count, ++nums) { if (*nums >= 0 && *nums == intf) return nums - func->interfaces_nums; } return -EDOM; } /* Misc helper functions ****************************************************/ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) { return nonblock ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN : mutex_lock_interruptible(mutex); } static char *ffs_prepare_buffer(const char * __user buf, size_t len) { char *data; if (unlikely(!len)) return NULL; data = kmalloc(len, GFP_KERNEL); if (unlikely(!data)) return ERR_PTR(-ENOMEM); if (unlikely(__copy_from_user(data, buf, len))) { kfree(data); return ERR_PTR(-EFAULT); } FVDBG("Buffer from user space:"); ffs_dump_mem("", data, len); return data; }
gpl-2.0
joaoluizdhv/controlearduino
arduino-1.0.3/hardware/arduino/firmwares/wifishield/wifiHD/src/SOFTWARE_FRAMEWORK/BOARDS/EVK1105/led.c
680
10679
/* This source file is part of the ATMEL AVR-UC3-SoftwareFramework-1.7.0 Release */ /*This file is prepared for Doxygen automatic documentation generation.*/ /*! \file ********************************************************************* * * \brief AT32UC3A EVK1105 board LEDs support package. * * This file contains definitions and services related to the LED features of * the EVK1105 board. * * - Compiler: IAR EWAVR32 and GNU GCC for AVR32 * - Supported devices: All AVR32 AT32UC3A devices can be used. * - AppNote: * * \author Atmel Corporation: http://www.atmel.com \n * Support and FAQ: http://support.atmel.no/ * ******************************************************************************/ /* Copyright (c) 2009 Atmel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. The name of Atmel may not be used to endorse or promote products derived * from this software without specific prior written permission. * * 4. This software may only be redistributed and used in connection with an Atmel * AVR product. * * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE * */ #include <avr32/io.h> #include "preprocessor.h" #include "compiler.h" #include "evk1105.h" #include "led.h" //! Structure describing LED hardware connections. typedef const struct { struct { U32 PORT; //!< LED GPIO port. U32 PIN_MASK; //!< Bit-mask of LED pin in GPIO port. } GPIO; //!< LED GPIO descriptor. struct { S32 CHANNEL; //!< LED PWM channel (< 0 if N/A). S32 FUNCTION; //!< LED pin PWM function (< 0 if N/A). } PWM; //!< LED PWM descriptor. } tLED_DESCRIPTOR; //! Hardware descriptors of all LEDs. static tLED_DESCRIPTOR LED_DESCRIPTOR[LED_COUNT] = { #define INSERT_LED_DESCRIPTOR(LED_NO, unused) \ { \ {LED##LED_NO##_GPIO / 32, 1 << (LED##LED_NO##_GPIO % 32)},\ {LED##LED_NO##_PWM, LED##LED_NO##_PWM_FUNCTION } \ }, MREPEAT(LED_COUNT, INSERT_LED_DESCRIPTOR, ~) #undef INSERT_LED_DESCRIPTOR }; //! Saved state of all LEDs. static volatile U32 LED_State = (1 << LED_COUNT) - 1; U32 LED_Read_Display(void) { return LED_State; } void LED_Display(U32 leds) { // Use the LED descriptors to get the connections of a given LED to the MCU. tLED_DESCRIPTOR *led_descriptor; volatile avr32_gpio_port_t *led_gpio_port; // Make sure only existing LEDs are specified. leds &= (1 << LED_COUNT) - 1; // Update the saved state of all LEDs with the requested changes. LED_State = leds; // For all LEDs... for (led_descriptor = &LED_DESCRIPTOR[0]; led_descriptor < LED_DESCRIPTOR + LED_COUNT; led_descriptor++) { // Set the LED to the requested state. led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; if (leds & 1) { led_gpio_port->ovrc = led_descriptor->GPIO.PIN_MASK; } else { led_gpio_port->ovrs = led_descriptor->GPIO.PIN_MASK; } led_gpio_port->oders = led_descriptor->GPIO.PIN_MASK; led_gpio_port->gpers = led_descriptor->GPIO.PIN_MASK; leds >>= 1; } } U32 LED_Read_Display_Mask(U32 mask) { return Rd_bits(LED_State, mask); } void LED_Display_Mask(U32 mask, U32 leds) { // Use the LED descriptors to get the connections of a given LED to the MCU. tLED_DESCRIPTOR *led_descriptor = &LED_DESCRIPTOR[0] - 1; volatile avr32_gpio_port_t *led_gpio_port; U8 led_shift; // Make sure only existing LEDs are specified. mask &= (1 << LED_COUNT) - 1; // Update the saved state of all LEDs with the requested changes. Wr_bits(LED_State, mask, leds); // While there are specified LEDs left to manage... while (mask) { // Select the next specified LED and set it to the requested state. led_shift = 1 + ctz(mask); led_descriptor += led_shift; led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; leds >>= led_shift - 1; if (leds & 1) { led_gpio_port->ovrc = led_descriptor->GPIO.PIN_MASK; } else { led_gpio_port->ovrs = led_descriptor->GPIO.PIN_MASK; } led_gpio_port->oders = led_descriptor->GPIO.PIN_MASK; led_gpio_port->gpers = led_descriptor->GPIO.PIN_MASK; leds >>= 1; mask >>= led_shift; } } Bool LED_Test(U32 leds) { return Tst_bits(LED_State, leds); } void LED_Off(U32 leds) { // Use the LED descriptors to get the connections of a given LED to the MCU. tLED_DESCRIPTOR *led_descriptor = &LED_DESCRIPTOR[0] - 1; volatile avr32_gpio_port_t *led_gpio_port; U8 led_shift; // Make sure only existing LEDs are specified. leds &= (1 << LED_COUNT) - 1; // Update the saved state of all LEDs with the requested changes. Clr_bits(LED_State, leds); // While there are specified LEDs left to manage... while (leds) { // Select the next specified LED and turn it off. led_shift = 1 + ctz(leds); led_descriptor += led_shift; led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; led_gpio_port->ovrs = led_descriptor->GPIO.PIN_MASK; led_gpio_port->oders = led_descriptor->GPIO.PIN_MASK; led_gpio_port->gpers = led_descriptor->GPIO.PIN_MASK; leds >>= led_shift; } } void LED_On(U32 leds) { // Use the LED descriptors to get the connections of a given LED to the MCU. tLED_DESCRIPTOR *led_descriptor = &LED_DESCRIPTOR[0] - 1; volatile avr32_gpio_port_t *led_gpio_port; U8 led_shift; // Make sure only existing LEDs are specified. leds &= (1 << LED_COUNT) - 1; // Update the saved state of all LEDs with the requested changes. Set_bits(LED_State, leds); // While there are specified LEDs left to manage... while (leds) { // Select the next specified LED and turn it on. led_shift = 1 + ctz(leds); led_descriptor += led_shift; led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; led_gpio_port->ovrc = led_descriptor->GPIO.PIN_MASK; led_gpio_port->oders = led_descriptor->GPIO.PIN_MASK; led_gpio_port->gpers = led_descriptor->GPIO.PIN_MASK; leds >>= led_shift; } } void LED_Toggle(U32 leds) { // Use the LED descriptors to get the connections of a given LED to the MCU. tLED_DESCRIPTOR *led_descriptor = &LED_DESCRIPTOR[0] - 1; volatile avr32_gpio_port_t *led_gpio_port; U8 led_shift; // Make sure only existing LEDs are specified. leds &= (1 << LED_COUNT) - 1; // Update the saved state of all LEDs with the requested changes. Tgl_bits(LED_State, leds); // While there are specified LEDs left to manage... while (leds) { // Select the next specified LED and toggle it. led_shift = 1 + ctz(leds); led_descriptor += led_shift; led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; led_gpio_port->ovrt = led_descriptor->GPIO.PIN_MASK; led_gpio_port->oders = led_descriptor->GPIO.PIN_MASK; led_gpio_port->gpers = led_descriptor->GPIO.PIN_MASK; leds >>= led_shift; } } U32 LED_Read_Display_Field(U32 field) { return Rd_bitfield(LED_State, field); } void LED_Display_Field(U32 field, U32 leds) { // Move the bit-field to the appropriate position for the bit-mask. LED_Display_Mask(field, leds << ctz(field)); } U8 LED_Get_Intensity(U32 led) { tLED_DESCRIPTOR *led_descriptor; // Check that the argument value is valid. led = ctz(led); led_descriptor = &LED_DESCRIPTOR[led]; if (led >= LED_COUNT || led_descriptor->PWM.CHANNEL < 0) return 0; // Return the duty cycle value if the LED PWM channel is enabled, else 0. return (AVR32_PWM.sr & (1 << led_descriptor->PWM.CHANNEL)) ? AVR32_PWM.channel[led_descriptor->PWM.CHANNEL].cdty : 0; } void LED_Set_Intensity(U32 leds, U8 intensity) { tLED_DESCRIPTOR *led_descriptor = &LED_DESCRIPTOR[0] - 1; volatile avr32_pwm_channel_t *led_pwm_channel; volatile avr32_gpio_port_t *led_gpio_port; U8 led_shift; // For each specified LED... for (leds &= (1 << LED_COUNT) - 1; leds; leds >>= led_shift) { // Select the next specified LED and check that it has a PWM channel. led_shift = 1 + ctz(leds); led_descriptor += led_shift; if (led_descriptor->PWM.CHANNEL < 0) continue; // Initialize or update the LED PWM channel. led_pwm_channel = &AVR32_PWM.channel[led_descriptor->PWM.CHANNEL]; if (!(AVR32_PWM.sr & (1 << led_descriptor->PWM.CHANNEL))) { led_pwm_channel->cmr = (AVR32_PWM_CPRE_MCK << AVR32_PWM_CPRE_OFFSET) & ~(AVR32_PWM_CALG_MASK | AVR32_PWM_CPOL_MASK | AVR32_PWM_CPD_MASK); led_pwm_channel->cprd = 0x000000FF; led_pwm_channel->cdty = intensity; AVR32_PWM.ena = 1 << led_descriptor->PWM.CHANNEL; } else { AVR32_PWM.isr; while (!(AVR32_PWM.isr & (1 << led_descriptor->PWM.CHANNEL))); led_pwm_channel->cupd = intensity; } // Switch the LED pin to its PWM function. led_gpio_port = &AVR32_GPIO.port[led_descriptor->GPIO.PORT]; if (led_descriptor->PWM.FUNCTION & 0x1) { led_gpio_port->pmr0s = led_descriptor->GPIO.PIN_MASK; } else { led_gpio_port->pmr0c = led_descriptor->GPIO.PIN_MASK; } if (led_descriptor->PWM.FUNCTION & 0x2) { led_gpio_port->pmr1s = led_descriptor->GPIO.PIN_MASK; } else { led_gpio_port->pmr1c = led_descriptor->GPIO.PIN_MASK; } led_gpio_port->gperc = led_descriptor->GPIO.PIN_MASK; } }
gpl-2.0
iamroot-x86-10/linux-3.10.4
fs/ext3/super.c
1192
85621
/* * linux/fs/ext3/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/exportfs.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include "ext3.h" #include "xattr.h" #include "acl.h" #include "namei.h" #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA #endif static int ext3_load_journal(struct super_block *, struct ext3_super_block *, unsigned long journal_devnum); static int ext3_create_journal(struct super_block *, struct ext3_super_block *, unsigned int); static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync); static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es); static void ext3_clear_journal_err(struct super_block * sb, struct ext3_super_block * es); static int ext3_sync_fs(struct super_block *sb, int wait); static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]); static int ext3_remount (struct super_block * sb, int * flags, char * data); static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext3_unfreeze(struct super_block *sb); static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ journal = EXT3_SB(sb)->s_journal; if (is_journal_aborted(journal)) { ext3_abort(sb, __func__, "Detected aborted journal"); return ERR_PTR(-EROFS); } return journal_start(journal, nblocks); } int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; int err; int rc; sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = journal_stop(handle); if (!err) err = rc; if (err) __ext3_std_error(sb, where, err); return err; } void ext3_journal_abort_handle(const char *caller, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext3_decode_error(NULL, err, nbuf); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", caller, errstr, err_fn); journal_abort_handle(handle); } void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext3, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext3_handle_error(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); if (sb->s_flags & MS_RDONLY) return; if (!test_opt (sb, ERRORS_CONT)) { journal_t *journal = EXT3_SB(sb)->s_journal; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (journal) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } void ext3_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); ext3_handle_error(sb); } static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext3_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext3_std_error (struct super_block * sb, const char * function, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext3_decode_error(sb, errno, nbuf); ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } /* * ext3_abort is a much stronger failure handler than ext3_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void ext3_abort(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (EXT3_SB(sb)->s_journal) journal_abort(EXT3_SB(sb)->s_journal, -EIO); } void ext3_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ext3_update_dynamic_rev(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; ext3_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static void ext3_blkdev_put(struct block_device *bdev) { blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static void ext3_blkdev_remove(struct ext3_sb_info *sbi) { struct block_device *bdev; bdev = sbi->journal_bdev; if (bdev) { ext3_blkdev_put(bdev); sbi->journal_bdev = NULL; } } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext3_put_super (struct super_block * sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int i, err; dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext3_xattr_put_super(sb); err = journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext3_abort(sb, __func__, "Couldn't clean up the journal"); if (!(sb->s_flags & MS_RDONLY)) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); BUFFER_TRACE(sbi->s_sbh, "marking dirty"); mark_buffer_dirty(sbi->s_sbh); ext3_commit_super(sb, es, 1); } for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; atomic_set(&ei->i_datasync_tid, 0); atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } static int ext3_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext3_drop_inode(inode, drop); return drop; } static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } static void ext3_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT3_I(inode)->i_orphan))) { printk("EXT3 Inode %p: orphan list check failed!\n", EXT3_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT3_I(inode), sizeof(struct ext3_inode_info), false); dump_stack(); } call_rcu(&inode->i_rcu, ext3_i_callback); } static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext3_inode_cachep); } static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } static char *data_mode_string(unsigned long mode) { switch (mode) { case EXT3_MOUNT_JOURNAL_DATA: return "journal"; case EXT3_MOUNT_ORDERED_DATA: return "ordered"; case EXT3_MOUNT_WRITEBACK_DATA: return "writeback"; } return "unknown"; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext3_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; unsigned long def_mount_opts; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) { seq_printf(seq, ",resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); } if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { seq_printf(seq, ",resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT3_ERRORS_PANIC || def_errors == EXT3_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT3_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT3_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); if (sbi->s_commit_interval) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } /* * Always display barrier state so it's clear what the status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); ext3_show_quota_options(seq, sb); return 0; } static struct inode *ext3_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext3_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext3_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT3_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext3_quota_operations = { .write_dquot = ext3_write_dquot, .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, .write_info = ext3_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext3_sops = { .alloc_inode = ext3_alloc_inode, .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, .freeze_fs = ext3_freeze, .unfreeze_fs = ext3_unfreeze, .statfs = ext3_statfs, .remount_fs = ext3_remount, .show_options = ext3_show_options, #ifdef CONFIG_QUOTA .quota_read = ext3_quota_read, .quota_write = ext3_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext3_export_ops = { .fh_to_dentry = ext3_fh_to_dentry, .fh_to_parent = ext3_fh_to_parent, .get_parent = ext3_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "nocheck"}, {Opt_nocheck, "check=none"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_inum, "journal=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, {Opt_err, NULL}, }; static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext3_sb_info *sbi = EXT3_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext3_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype]) { int same = !strcmp(sbi->s_qf_names[qtype], qname); kfree(qname); if (!same) { ext3_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); } return same; } if (strchr(qname, '/')) { ext3_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; set_opt(sbi->s_mount_opt, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } if (sbi->s_qf_names[qtype]) { kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; } return 1; } #endif static int parse_options (char *options, struct super_block *sb, unsigned int *inum, unsigned long *journal_devnum, ext3_fsblk_t *n_blocks_count, int is_remount) { struct ext3_sb_info *sbi = EXT3_SB(sb); char * p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; kuid_t uid; kgid_t gid; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) { ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option); return 0; } sbi->s_resuid = uid; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) { ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option); return 0; } sbi->s_resgid = gid; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT3_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext3_msg(sb, KERN_INFO, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext3_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *inum = option; break; case Opt_journal_dev: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_noload: set_opt (sbi->s_mount_opt, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_data_journal: data_opt = EXT3_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT3_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT3_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (test_opt(sb, DATA_FLAGS) == data_opt) break; ext3_msg(sb, KERN_ERR, "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " "try to remount it in data=%s mode.", data_mode_string(test_opt(sb, DATA_FLAGS)), data_mode_string(data_opt)); return 0; } else { clear_opt(sbi->s_mount_opt, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext3_msg(sb, KERN_ERR, "error: cannot change " "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); clear_opt(sbi->s_mount_opt, USRQUOTA); clear_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext3_msg(sb, KERN_ERR, "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext3_msg(sb, KERN_ERR, "error: journaled quota options not " "supported."); break; case Opt_noquota: break; #endif case Opt_abort: set_opt(sbi->s_mount_opt, ABORT); break; case Opt_nobarrier: clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sbi->s_mount_opt, BARRIER); else clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext3_msg(sb, KERN_ERR, "error: resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated nobh option"); break; case Opt_bh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated bh option"); break; default: ext3_msg(sb, KERN_ERR, "error: unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sbi->s_mount_opt, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sbi->s_mount_opt, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext3_msg(sb, KERN_ERR, "error: old and new quota " "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " "enabled."); return 0; } } #endif return 1; } static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int read_only) { struct ext3_sb_info *sbi = EXT3_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { ext3_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= le16_to_cpu(es->s_max_mnt_count)) ext3_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext3_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for a plain journaled filesystem we can keep it set as valid forever! :) */ es->s_state &= cpu_to_le16(~EXT3_VALID_FS); #endif if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } cleancache_init_fs(sb); return res; } /* Called at mount-time, super-block is locked */ static int ext3_check_descriptors(struct super_block *sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); int i; ext3_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i); ext3_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb)); return 1; } /* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext3_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext3_orphan_cleanup (struct super_block * sb, struct ext3_super_block * es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, skipping orphan cleanup."); return; } /* Check if feature set allows readwrite operations */ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) ext3_msg(sb, KERN_ERR, "error: cannot turn on journaled " "quota: %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %Ld bytes\n", inode->i_ino, inode->i_size); ext3_truncate(inode); nr_truncates++; } else { printk(KERN_DEBUG "%s: deleting unreferenced inode %lu\n", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext3_max_size(int bits) { loff_t res = EXT3_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); } static int ext3_fill_super (struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; int blocksize; int hblock; int db_count; int i; int needs_recovery; int ret = -EINVAL; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } /* * The ext3 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT3_MIN_BLOCK_SIZE) { logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext3 macro-instructions depend on its value */ es = (struct ext3_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT3_SUPER_MAGIC) goto cantfind_ext3; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT3_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT3_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT3_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT3_FS_XATTR if (def_mount_opts & EXT3_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (def_mount_opts & EXT3_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) set_opt(sbi->s_mount_opt, JOURNAL_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) set_opt(sbi->s_mount_opt, ORDERED_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) set_opt(sbi->s_mount_opt, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); /* enable barriers by default */ set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, NULL, 0)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext3_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "filesystem blocksize %d", blocksize); goto failed_mount; } hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: fsblocksize %d too small for " "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { ext3_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { ext3_msg(sb, KERN_ERR, "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { ext3_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) { sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext3_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { ext3_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } sbi->s_frags_per_block = 1; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext3; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb)); for (i=0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } if (sbi->s_blocks_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } err = generic_check_addressable(sb->s_blocksize_bits, le32_to_cpu(es->s_blocks_count)); if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT3_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT3_BLOCKS_PER_GROUP(sb)) + 1; db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb)); sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext3_msg(sb, KERN_ERR, "error: not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext3_msg(sb, KERN_ERR, "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { ext3_msg(sb, KERN_ERR, "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext3_rsv_window_add(sb, &sbi->s_rsv_window_head); /* * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; sb->s_export_op = &ext3_export_ops; sb->s_xattr = ext3_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext3_qctl_operations; sb->dq_op = &ext3_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); mutex_init(&sbi->s_resize_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)); /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext3_load_journal(sb, es, journal_devnum)) goto failed_mount2; } else if (journal_inum) { if (ext3_create_journal(sb, es, journal_inum)) goto failed_mount2; } else { if (!silent) ext3_msg(sb, KERN_ERR, "error: no journal found. " "mounting ext3 over ext2?"); goto failed_mount2; } err = percpu_counter_init(&sbi->s_freeblocks_counter, ext3_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext3_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext3_count_dirs(sb)); } if (err) { ext3_msg(sb, KERN_ERR, "error: insufficient memory"); ret = err; goto failed_mount3; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal capabilities: ORDERED_DATA if the journal can cope, else JOURNAL_DATA */ if (journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE); else set_opt(sbi->s_mount_opt, JOURNAL_DATA); break; case EXT3_MOUNT_ORDERED_DATA: case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { ext3_msg(sb, KERN_ERR, "error: journal does not support " "requested data journaling mode"); goto failed_mount3; } default: break; } /* * The journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); ret = -ENOMEM; goto failed_mount3; } if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) { ext3_mark_recovery_complete(sb, es); ext3_msg(sb, KERN_INFO, "recovery complete"); } ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); return 0; cantfind_ext3: if (!silent) ext3_msg(sb, KERN_INFO, "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); journal_destroy(sbi->s_journal); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); failed_mount: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext3_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_commit_interval) journal->j_commit_interval = sbi->s_commit_interval; /* We could also set up an ext3-specific default for the commit * interval here, but for now we'll just fall back to the jbd * default. */ spin_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JFS_BARRIER; else journal->j_flags &= ~JFS_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR; spin_unlock(&journal->j_state_lock); } static journal_t *ext3_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext3_init_journal_params(sb, journal); return journal; } static journal_t *ext3_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head * bh; journal_t *journal; ext3_fsblk_t start; ext3_fsblk_t len; int hblock, blocksize; ext3_fsblk_t sb_block; unsigned long offset; struct ext3_super_block * es; struct block_device *bdev; bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: blocksize too small for journal device"); goto out_bdev; } sb_block = EXT3_MIN_BLOCK_SIZE / blocksize; offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext3_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext3_msg(sb, KERN_ERR, "error: external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } len = le32_to_cpu(es->s_blocks_count); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext3_msg(sb, KERN_ERR, "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; if (!bh_uptodate_or_lock(journal->j_sb_buffer)) { if (bh_submit_read(journal->j_sb_buffer)) { ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext3_msg(sb, KERN_ERR, "error: external journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT3_SB(sb)->journal_bdev = bdev; ext3_init_journal_params(sb, journal); return journal; out_journal: journal_destroy(journal); out_bdev: ext3_blkdev_put(bdev); return NULL; } static int ext3_load_journal(struct super_block *sb, struct ext3_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext3_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "recovery required on readonly filesystem"); if (really_read_only) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, cannot proceed"); return -EROFS; } ext3_msg(sb, KERN_INFO, "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " "and inode journals"); return -EINVAL; } if (journal_inum) { if (!(journal = ext3_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext3_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JFS_BARRIER)) printk(KERN_INFO "EXT3-fs: barriers not enabled\n"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } } if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) err = journal_wipe(journal, !really_read_only); if (!err) err = journal_load(journal); if (err) { ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); } return 0; } static int ext3_create_journal(struct super_block *sb, struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_ERR, "error: readonly filesystem when trying to " "create journal"); return -EROFS; } journal = ext3_get_journal(sb, journal_inum); if (!journal) return -EINVAL; ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } EXT3_SB(sb)->s_journal = journal; ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); es->s_journal_inum = cpu_to_le32(journal_inum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); return 0; } static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync) { struct buffer_head *sbh = EXT3_SB(sb)->s_sbh; int error = 0; if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext3_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (buffer_write_io_error(sbh)) { ext3_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es) { journal_t *journal = EXT3_SB(sb)->s_journal; journal_lock_updates(journal); if (journal_flush(journal) < 0) goto out; if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); } out: journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext3_clear_journal_err(struct super_block *sb, struct ext3_super_block *es) { journal_t *journal; int j_errno; const char *errstr; journal = EXT3_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext3_error() or ext3_abort() */ j_errno = journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext3_decode_error(sb, j_errno, nbuf); ext3_warning(sb, __func__, "Filesystem error recorded " "from previous mount: %s", errstr); ext3_warning(sb, __func__, "Marking fs in need of " "filesystem check."); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); ext3_commit_super (sb, es, 1); journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext3_force_commit(struct super_block *sb) { journal_t *journal; int ret; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT3_SB(sb)->s_journal; ret = ext3_journal_force_commit(journal); return ret; } static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; trace_ext3_sync_fs(sb, wait); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); } return 0; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. */ static int ext3_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (!(sb->s_flags & MS_RDONLY)) { journal = EXT3_SB(sb)->s_journal; /* Now we set up the journal barrier. */ journal_lock_updates(journal); /* * We don't want to clear needs_recovery flag when we failed * to flush the journal. */ error = journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); if (error) goto out; } return 0; out: journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext3_unfreeze(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) { /* Reser the needs_recovery flag before the fs is unlocked. */ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); journal_unlock_updates(EXT3_SB(sb)->s_journal); } return 0; } static int ext3_remount (struct super_block * sb, int * flags, char * data) { struct ext3_super_block * es; struct ext3_sb_info *sbi = EXT3_SB(sb); ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; #endif /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { int j; for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; ext3_init_journal_params(sb, sbi->s_journal); if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > le32_to_cpu(es->s_blocks_count)) { if (test_opt(sb, ABORT)) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) && (sbi->s_mount_state & EXT3_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); ext3_mark_recovery_complete(sb, es); } else { __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { ext3_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR " "because of unsupported optional " "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount & mount for now. */ if (es->s_last_orphan) { ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount & mount instead."); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ ext3_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext3_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; enable_quota = 1; } } #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); #endif if (enable_quota) dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif return err; } static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; u64 fsid; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long ngroups = sbi->s_groups_count, i; ext3_fsblk_t overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < ngroups; i++) { overhead += ext3_bg_has_super(sb, i) + ext3_bg_num_gdb(sb, i); cond_resched(); } /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += ngroups * (2 + sbi->s_itb_per_group); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT3_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction before quota file * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext3_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext3_journal_start(inode, EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext3_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext3_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext3_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext3_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext3_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (!handle) { ext3_msg(sb, KERN_WARNING, "warning: quota write (off=%llu, len=%llu)" " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } bh = ext3_bread(handle, inode, blk, 1, &err); if (!bh) goto out; if (journal_quota) { err = ext3_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); if (journal_quota) err = ext3_journal_dirty_metadata(handle, bh); else { /* Always do at least ordered writes for quotas */ err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } brelse(bh); out: if (err) return err; if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT3_I(inode)->i_disksize = inode->i_size; } inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext3_mark_inode_dirty(handle, inode); return len; } #endif static struct dentry *ext3_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super); } static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext3_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); static int __init init_ext3_fs(void) { int err = init_ext3_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext3_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext3_xattr(); return err; } static void __exit exit_ext3_fs(void) { unregister_filesystem(&ext3_fs_type); destroy_inodecache(); exit_ext3_xattr(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); MODULE_LICENSE("GPL"); module_init(init_ext3_fs) module_exit(exit_ext3_fs)
gpl-2.0
Prometheus1408/android_kernel_cyanogen_msm8916
fs/ext3/super.c
1192
85621
/* * linux/fs/ext3/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/exportfs.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include "ext3.h" #include "xattr.h" #include "acl.h" #include "namei.h" #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA #endif static int ext3_load_journal(struct super_block *, struct ext3_super_block *, unsigned long journal_devnum); static int ext3_create_journal(struct super_block *, struct ext3_super_block *, unsigned int); static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync); static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es); static void ext3_clear_journal_err(struct super_block * sb, struct ext3_super_block * es); static int ext3_sync_fs(struct super_block *sb, int wait); static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]); static int ext3_remount (struct super_block * sb, int * flags, char * data); static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext3_unfreeze(struct super_block *sb); static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ journal = EXT3_SB(sb)->s_journal; if (is_journal_aborted(journal)) { ext3_abort(sb, __func__, "Detected aborted journal"); return ERR_PTR(-EROFS); } return journal_start(journal, nblocks); } int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; int err; int rc; sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = journal_stop(handle); if (!err) err = rc; if (err) __ext3_std_error(sb, where, err); return err; } void ext3_journal_abort_handle(const char *caller, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext3_decode_error(NULL, err, nbuf); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", caller, errstr, err_fn); journal_abort_handle(handle); } void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext3, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext3_handle_error(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); if (sb->s_flags & MS_RDONLY) return; if (!test_opt (sb, ERRORS_CONT)) { journal_t *journal = EXT3_SB(sb)->s_journal; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (journal) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } void ext3_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); ext3_handle_error(sb); } static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext3_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext3_std_error (struct super_block * sb, const char * function, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext3_decode_error(sb, errno, nbuf); ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } /* * ext3_abort is a much stronger failure handler than ext3_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void ext3_abort(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (EXT3_SB(sb)->s_journal) journal_abort(EXT3_SB(sb)->s_journal, -EIO); } void ext3_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ext3_update_dynamic_rev(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; ext3_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static void ext3_blkdev_put(struct block_device *bdev) { blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static void ext3_blkdev_remove(struct ext3_sb_info *sbi) { struct block_device *bdev; bdev = sbi->journal_bdev; if (bdev) { ext3_blkdev_put(bdev); sbi->journal_bdev = NULL; } } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext3_put_super (struct super_block * sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int i, err; dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext3_xattr_put_super(sb); err = journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext3_abort(sb, __func__, "Couldn't clean up the journal"); if (!(sb->s_flags & MS_RDONLY)) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); BUFFER_TRACE(sbi->s_sbh, "marking dirty"); mark_buffer_dirty(sbi->s_sbh); ext3_commit_super(sb, es, 1); } for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; atomic_set(&ei->i_datasync_tid, 0); atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } static int ext3_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext3_drop_inode(inode, drop); return drop; } static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } static void ext3_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT3_I(inode)->i_orphan))) { printk("EXT3 Inode %p: orphan list check failed!\n", EXT3_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT3_I(inode), sizeof(struct ext3_inode_info), false); dump_stack(); } call_rcu(&inode->i_rcu, ext3_i_callback); } static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext3_inode_cachep); } static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } static char *data_mode_string(unsigned long mode) { switch (mode) { case EXT3_MOUNT_JOURNAL_DATA: return "journal"; case EXT3_MOUNT_ORDERED_DATA: return "ordered"; case EXT3_MOUNT_WRITEBACK_DATA: return "writeback"; } return "unknown"; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext3_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; unsigned long def_mount_opts; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) { seq_printf(seq, ",resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); } if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { seq_printf(seq, ",resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT3_ERRORS_PANIC || def_errors == EXT3_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT3_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT3_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); if (sbi->s_commit_interval) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } /* * Always display barrier state so it's clear what the status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); ext3_show_quota_options(seq, sb); return 0; } static struct inode *ext3_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext3_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext3_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT3_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext3_quota_operations = { .write_dquot = ext3_write_dquot, .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, .write_info = ext3_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext3_sops = { .alloc_inode = ext3_alloc_inode, .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, .freeze_fs = ext3_freeze, .unfreeze_fs = ext3_unfreeze, .statfs = ext3_statfs, .remount_fs = ext3_remount, .show_options = ext3_show_options, #ifdef CONFIG_QUOTA .quota_read = ext3_quota_read, .quota_write = ext3_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext3_export_ops = { .fh_to_dentry = ext3_fh_to_dentry, .fh_to_parent = ext3_fh_to_parent, .get_parent = ext3_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "nocheck"}, {Opt_nocheck, "check=none"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_inum, "journal=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, {Opt_err, NULL}, }; static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext3_sb_info *sbi = EXT3_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext3_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype]) { int same = !strcmp(sbi->s_qf_names[qtype], qname); kfree(qname); if (!same) { ext3_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); } return same; } if (strchr(qname, '/')) { ext3_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; set_opt(sbi->s_mount_opt, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } if (sbi->s_qf_names[qtype]) { kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; } return 1; } #endif static int parse_options (char *options, struct super_block *sb, unsigned int *inum, unsigned long *journal_devnum, ext3_fsblk_t *n_blocks_count, int is_remount) { struct ext3_sb_info *sbi = EXT3_SB(sb); char * p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; kuid_t uid; kgid_t gid; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) { ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option); return 0; } sbi->s_resuid = uid; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) { ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option); return 0; } sbi->s_resgid = gid; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT3_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext3_msg(sb, KERN_INFO, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext3_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *inum = option; break; case Opt_journal_dev: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_noload: set_opt (sbi->s_mount_opt, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_data_journal: data_opt = EXT3_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT3_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT3_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (test_opt(sb, DATA_FLAGS) == data_opt) break; ext3_msg(sb, KERN_ERR, "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " "try to remount it in data=%s mode.", data_mode_string(test_opt(sb, DATA_FLAGS)), data_mode_string(data_opt)); return 0; } else { clear_opt(sbi->s_mount_opt, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext3_msg(sb, KERN_ERR, "error: cannot change " "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); clear_opt(sbi->s_mount_opt, USRQUOTA); clear_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext3_msg(sb, KERN_ERR, "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext3_msg(sb, KERN_ERR, "error: journaled quota options not " "supported."); break; case Opt_noquota: break; #endif case Opt_abort: set_opt(sbi->s_mount_opt, ABORT); break; case Opt_nobarrier: clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sbi->s_mount_opt, BARRIER); else clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext3_msg(sb, KERN_ERR, "error: resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated nobh option"); break; case Opt_bh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated bh option"); break; default: ext3_msg(sb, KERN_ERR, "error: unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sbi->s_mount_opt, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sbi->s_mount_opt, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext3_msg(sb, KERN_ERR, "error: old and new quota " "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " "enabled."); return 0; } } #endif return 1; } static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int read_only) { struct ext3_sb_info *sbi = EXT3_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { ext3_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= le16_to_cpu(es->s_max_mnt_count)) ext3_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext3_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for a plain journaled filesystem we can keep it set as valid forever! :) */ es->s_state &= cpu_to_le16(~EXT3_VALID_FS); #endif if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } cleancache_init_fs(sb); return res; } /* Called at mount-time, super-block is locked */ static int ext3_check_descriptors(struct super_block *sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); int i; ext3_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i); ext3_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb)); return 1; } /* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext3_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext3_orphan_cleanup (struct super_block * sb, struct ext3_super_block * es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, skipping orphan cleanup."); return; } /* Check if feature set allows readwrite operations */ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) ext3_msg(sb, KERN_ERR, "error: cannot turn on journaled " "quota: %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %Ld bytes\n", inode->i_ino, inode->i_size); ext3_truncate(inode); nr_truncates++; } else { printk(KERN_DEBUG "%s: deleting unreferenced inode %lu\n", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext3_max_size(int bits) { loff_t res = EXT3_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); } static int ext3_fill_super (struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; int blocksize; int hblock; int db_count; int i; int needs_recovery; int ret = -EINVAL; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } /* * The ext3 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT3_MIN_BLOCK_SIZE) { logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext3 macro-instructions depend on its value */ es = (struct ext3_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT3_SUPER_MAGIC) goto cantfind_ext3; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT3_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT3_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT3_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT3_FS_XATTR if (def_mount_opts & EXT3_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (def_mount_opts & EXT3_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) set_opt(sbi->s_mount_opt, JOURNAL_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) set_opt(sbi->s_mount_opt, ORDERED_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) set_opt(sbi->s_mount_opt, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); /* enable barriers by default */ set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, NULL, 0)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext3_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "filesystem blocksize %d", blocksize); goto failed_mount; } hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: fsblocksize %d too small for " "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { ext3_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { ext3_msg(sb, KERN_ERR, "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { ext3_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) { sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext3_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { ext3_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } sbi->s_frags_per_block = 1; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext3; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb)); for (i=0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } if (sbi->s_blocks_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } err = generic_check_addressable(sb->s_blocksize_bits, le32_to_cpu(es->s_blocks_count)); if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT3_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT3_BLOCKS_PER_GROUP(sb)) + 1; db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb)); sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext3_msg(sb, KERN_ERR, "error: not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext3_msg(sb, KERN_ERR, "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { ext3_msg(sb, KERN_ERR, "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext3_rsv_window_add(sb, &sbi->s_rsv_window_head); /* * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; sb->s_export_op = &ext3_export_ops; sb->s_xattr = ext3_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext3_qctl_operations; sb->dq_op = &ext3_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); mutex_init(&sbi->s_resize_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)); /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext3_load_journal(sb, es, journal_devnum)) goto failed_mount2; } else if (journal_inum) { if (ext3_create_journal(sb, es, journal_inum)) goto failed_mount2; } else { if (!silent) ext3_msg(sb, KERN_ERR, "error: no journal found. " "mounting ext3 over ext2?"); goto failed_mount2; } err = percpu_counter_init(&sbi->s_freeblocks_counter, ext3_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext3_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext3_count_dirs(sb)); } if (err) { ext3_msg(sb, KERN_ERR, "error: insufficient memory"); ret = err; goto failed_mount3; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal capabilities: ORDERED_DATA if the journal can cope, else JOURNAL_DATA */ if (journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE); else set_opt(sbi->s_mount_opt, JOURNAL_DATA); break; case EXT3_MOUNT_ORDERED_DATA: case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { ext3_msg(sb, KERN_ERR, "error: journal does not support " "requested data journaling mode"); goto failed_mount3; } default: break; } /* * The journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); ret = -ENOMEM; goto failed_mount3; } if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) { ext3_mark_recovery_complete(sb, es); ext3_msg(sb, KERN_INFO, "recovery complete"); } ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); return 0; cantfind_ext3: if (!silent) ext3_msg(sb, KERN_INFO, "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); journal_destroy(sbi->s_journal); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); failed_mount: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext3_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_commit_interval) journal->j_commit_interval = sbi->s_commit_interval; /* We could also set up an ext3-specific default for the commit * interval here, but for now we'll just fall back to the jbd * default. */ spin_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JFS_BARRIER; else journal->j_flags &= ~JFS_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR; spin_unlock(&journal->j_state_lock); } static journal_t *ext3_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext3_init_journal_params(sb, journal); return journal; } static journal_t *ext3_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head * bh; journal_t *journal; ext3_fsblk_t start; ext3_fsblk_t len; int hblock, blocksize; ext3_fsblk_t sb_block; unsigned long offset; struct ext3_super_block * es; struct block_device *bdev; bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: blocksize too small for journal device"); goto out_bdev; } sb_block = EXT3_MIN_BLOCK_SIZE / blocksize; offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext3_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext3_msg(sb, KERN_ERR, "error: external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } len = le32_to_cpu(es->s_blocks_count); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext3_msg(sb, KERN_ERR, "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; if (!bh_uptodate_or_lock(journal->j_sb_buffer)) { if (bh_submit_read(journal->j_sb_buffer)) { ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext3_msg(sb, KERN_ERR, "error: external journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT3_SB(sb)->journal_bdev = bdev; ext3_init_journal_params(sb, journal); return journal; out_journal: journal_destroy(journal); out_bdev: ext3_blkdev_put(bdev); return NULL; } static int ext3_load_journal(struct super_block *sb, struct ext3_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext3_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "recovery required on readonly filesystem"); if (really_read_only) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, cannot proceed"); return -EROFS; } ext3_msg(sb, KERN_INFO, "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " "and inode journals"); return -EINVAL; } if (journal_inum) { if (!(journal = ext3_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext3_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JFS_BARRIER)) printk(KERN_INFO "EXT3-fs: barriers not enabled\n"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } } if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) err = journal_wipe(journal, !really_read_only); if (!err) err = journal_load(journal); if (err) { ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); } return 0; } static int ext3_create_journal(struct super_block *sb, struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_ERR, "error: readonly filesystem when trying to " "create journal"); return -EROFS; } journal = ext3_get_journal(sb, journal_inum); if (!journal) return -EINVAL; ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } EXT3_SB(sb)->s_journal = journal; ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); es->s_journal_inum = cpu_to_le32(journal_inum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); return 0; } static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync) { struct buffer_head *sbh = EXT3_SB(sb)->s_sbh; int error = 0; if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext3_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (buffer_write_io_error(sbh)) { ext3_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es) { journal_t *journal = EXT3_SB(sb)->s_journal; journal_lock_updates(journal); if (journal_flush(journal) < 0) goto out; if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); } out: journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext3_clear_journal_err(struct super_block *sb, struct ext3_super_block *es) { journal_t *journal; int j_errno; const char *errstr; journal = EXT3_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext3_error() or ext3_abort() */ j_errno = journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext3_decode_error(sb, j_errno, nbuf); ext3_warning(sb, __func__, "Filesystem error recorded " "from previous mount: %s", errstr); ext3_warning(sb, __func__, "Marking fs in need of " "filesystem check."); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); ext3_commit_super (sb, es, 1); journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext3_force_commit(struct super_block *sb) { journal_t *journal; int ret; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT3_SB(sb)->s_journal; ret = ext3_journal_force_commit(journal); return ret; } static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; trace_ext3_sync_fs(sb, wait); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); } return 0; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. */ static int ext3_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (!(sb->s_flags & MS_RDONLY)) { journal = EXT3_SB(sb)->s_journal; /* Now we set up the journal barrier. */ journal_lock_updates(journal); /* * We don't want to clear needs_recovery flag when we failed * to flush the journal. */ error = journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); if (error) goto out; } return 0; out: journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext3_unfreeze(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) { /* Reser the needs_recovery flag before the fs is unlocked. */ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); journal_unlock_updates(EXT3_SB(sb)->s_journal); } return 0; } static int ext3_remount (struct super_block * sb, int * flags, char * data) { struct ext3_super_block * es; struct ext3_sb_info *sbi = EXT3_SB(sb); ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; #endif /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { int j; for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; ext3_init_journal_params(sb, sbi->s_journal); if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > le32_to_cpu(es->s_blocks_count)) { if (test_opt(sb, ABORT)) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) && (sbi->s_mount_state & EXT3_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); ext3_mark_recovery_complete(sb, es); } else { __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { ext3_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR " "because of unsupported optional " "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount & mount for now. */ if (es->s_last_orphan) { ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount & mount instead."); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ ext3_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext3_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; enable_quota = 1; } } #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); #endif if (enable_quota) dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif return err; } static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; u64 fsid; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long ngroups = sbi->s_groups_count, i; ext3_fsblk_t overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < ngroups; i++) { overhead += ext3_bg_has_super(sb, i) + ext3_bg_num_gdb(sb, i); cond_resched(); } /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += ngroups * (2 + sbi->s_itb_per_group); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT3_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction before quota file * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext3_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext3_journal_start(inode, EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext3_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext3_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext3_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext3_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext3_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (!handle) { ext3_msg(sb, KERN_WARNING, "warning: quota write (off=%llu, len=%llu)" " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } bh = ext3_bread(handle, inode, blk, 1, &err); if (!bh) goto out; if (journal_quota) { err = ext3_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); if (journal_quota) err = ext3_journal_dirty_metadata(handle, bh); else { /* Always do at least ordered writes for quotas */ err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } brelse(bh); out: if (err) return err; if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT3_I(inode)->i_disksize = inode->i_size; } inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext3_mark_inode_dirty(handle, inode); return len; } #endif static struct dentry *ext3_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super); } static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext3_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); static int __init init_ext3_fs(void) { int err = init_ext3_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext3_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext3_xattr(); return err; } static void __exit exit_ext3_fs(void) { unregister_filesystem(&ext3_fs_type); destroy_inodecache(); exit_ext3_xattr(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); MODULE_LICENSE("GPL"); module_init(init_ext3_fs) module_exit(exit_ext3_fs)
gpl-2.0
alivanov79/spkernel
arch/arm/mach-msm/qdsp6v2/audio_amrnb.c
2216
4316
/* amrnb audio output device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "audio_utils_aio.h" #ifdef CONFIG_DEBUG_FS static const struct file_operations audio_amrnb_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, }; #endif static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct q6audio_aio *audio = file->private_data; int rc = 0; switch (cmd) { case AUDIO_START: { pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ rc = q6asm_enc_cfg_blk_pcm(audio->ac, audio->pcm_cfg.sample_rate, audio->pcm_cfg.channel_count); if (rc < 0) { pr_err("pcm output block config failed\n"); break; } } rc = audio_aio_enable(audio); audio->eos_rsp = 0; audio->eos_flag = 0; if (!rc) { audio->enabled = 1; } else { audio->enabled = 0; pr_err("Audio Start procedure failed rc=%d\n", rc); break; } pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); if (audio->stopped == 1) audio->stopped = 0; break; } default: pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; } static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_amrnb_" + 5]; #endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (audio == NULL) { pr_err("Could not allocate memory for wma decode driver\n"); return -ENOMEM; } audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio); if (!audio->ac) { pr_err("Could not allocate memory for audio client\n"); kfree(audio); return -ENOMEM; } /* open in T/NT mode */ if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, FORMAT_AMRNB); if (rc < 0) { pr_err("NT mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = NON_TUNNEL_MODE; audio->buf_cfg.frames_per_buf = 0x01; audio->buf_cfg.meta_info_enable = 0x01; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { rc = q6asm_open_write(audio->ac, FORMAT_AMRNB); if (rc < 0) { pr_err("T mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = TUNNEL_MODE; audio->buf_cfg.meta_info_enable = 0x00; } else { pr_err("Not supported mode\n"); rc = -EACCES; goto fail; } rc = audio_aio_open(audio, file); if (rc < 0) { pr_err("audio_aio_open rc=%d\n", rc); goto fail; } #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_amrnb_%04x", audio->ac->session); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *)audio, &audio_amrnb_debug_fops); if (IS_ERR(audio->dentry)) pr_debug("debugfs_create_file failed\n"); #endif pr_info("%s:amrnb decoder open success, session_id = %d\n", __func__, audio->ac->session); return rc; fail: q6asm_audio_client_free(audio->ac); kfree(audio); return rc; } static const struct file_operations audio_amrnb_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_aio_release, .unlocked_ioctl = audio_ioctl, .fsync = audio_aio_fsync, }; struct miscdevice audio_amrnb_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_amrnb", .fops = &audio_amrnb_fops, }; static int __init audio_amrnb_init(void) { return misc_register(&audio_amrnb_misc); } device_initcall(audio_amrnb_init);
gpl-2.0
lgics/lge-kernel-msm7x27-3.4
drivers/rtc/rtc-twl.c
3240
15770
/* * rtc-twl.c -- TWL Real Time Clock interface * * Copyright (C) 2007 MontaVista Software, Inc * Author: Alexandre Rusev <source@mvista.com> * * Based on original TI driver twl4030-rtc.c * Copyright (C) 2006 Texas Instruments, Inc. * * Based on rtc-omap.c * Copyright (C) 2003 MontaVista Software, Inc. * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com> * Copyright (C) 2006 David Brownell * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/i2c/twl.h> /* * RTC block register offsets (use TWL_MODULE_RTC) */ enum { REG_SECONDS_REG = 0, REG_MINUTES_REG, REG_HOURS_REG, REG_DAYS_REG, REG_MONTHS_REG, REG_YEARS_REG, REG_WEEKS_REG, REG_ALARM_SECONDS_REG, REG_ALARM_MINUTES_REG, REG_ALARM_HOURS_REG, REG_ALARM_DAYS_REG, REG_ALARM_MONTHS_REG, REG_ALARM_YEARS_REG, REG_RTC_CTRL_REG, REG_RTC_STATUS_REG, REG_RTC_INTERRUPTS_REG, REG_RTC_COMP_LSB_REG, REG_RTC_COMP_MSB_REG, }; static const u8 twl4030_rtc_reg_map[] = { [REG_SECONDS_REG] = 0x00, [REG_MINUTES_REG] = 0x01, [REG_HOURS_REG] = 0x02, [REG_DAYS_REG] = 0x03, [REG_MONTHS_REG] = 0x04, [REG_YEARS_REG] = 0x05, [REG_WEEKS_REG] = 0x06, [REG_ALARM_SECONDS_REG] = 0x07, [REG_ALARM_MINUTES_REG] = 0x08, [REG_ALARM_HOURS_REG] = 0x09, [REG_ALARM_DAYS_REG] = 0x0A, [REG_ALARM_MONTHS_REG] = 0x0B, [REG_ALARM_YEARS_REG] = 0x0C, [REG_RTC_CTRL_REG] = 0x0D, [REG_RTC_STATUS_REG] = 0x0E, [REG_RTC_INTERRUPTS_REG] = 0x0F, [REG_RTC_COMP_LSB_REG] = 0x10, [REG_RTC_COMP_MSB_REG] = 0x11, }; static const u8 twl6030_rtc_reg_map[] = { [REG_SECONDS_REG] = 0x00, [REG_MINUTES_REG] = 0x01, [REG_HOURS_REG] = 0x02, [REG_DAYS_REG] = 0x03, [REG_MONTHS_REG] = 0x04, [REG_YEARS_REG] = 0x05, [REG_WEEKS_REG] = 0x06, [REG_ALARM_SECONDS_REG] = 0x08, [REG_ALARM_MINUTES_REG] = 0x09, [REG_ALARM_HOURS_REG] = 0x0A, [REG_ALARM_DAYS_REG] = 0x0B, [REG_ALARM_MONTHS_REG] = 0x0C, [REG_ALARM_YEARS_REG] = 0x0D, [REG_RTC_CTRL_REG] = 0x10, [REG_RTC_STATUS_REG] = 0x11, [REG_RTC_INTERRUPTS_REG] = 0x12, [REG_RTC_COMP_LSB_REG] = 0x13, [REG_RTC_COMP_MSB_REG] = 0x14, }; /* RTC_CTRL_REG bitfields */ #define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01 #define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02 #define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04 #define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08 #define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10 #define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20 #define BIT_RTC_CTRL_REG_GET_TIME_M 0x40 #define BIT_RTC_CTRL_REG_RTC_V_OPT 0x80 /* RTC_STATUS_REG bitfields */ #define BIT_RTC_STATUS_REG_RUN_M 0x02 #define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04 #define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08 #define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10 #define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20 #define BIT_RTC_STATUS_REG_ALARM_M 0x40 #define BIT_RTC_STATUS_REG_POWER_UP_M 0x80 /* RTC_INTERRUPTS_REG bitfields */ #define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03 #define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04 #define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08 /* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */ #define ALL_TIME_REGS 6 /*----------------------------------------------------------------------*/ static u8 *rtc_reg_map; /* * Supports 1 byte read from TWL RTC register. */ static int twl_rtc_read_u8(u8 *data, u8 reg) { int ret; ret = twl_i2c_read_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg])); if (ret < 0) pr_err("twl_rtc: Could not read TWL" "register %X - error %d\n", reg, ret); return ret; } /* * Supports 1 byte write to TWL RTC registers. */ static int twl_rtc_write_u8(u8 data, u8 reg) { int ret; ret = twl_i2c_write_u8(TWL_MODULE_RTC, data, (rtc_reg_map[reg])); if (ret < 0) pr_err("twl_rtc: Could not write TWL" "register %X - error %d\n", reg, ret); return ret; } /* * Cache the value for timer/alarm interrupts register; this is * only changed by callers holding rtc ops lock (or resume). */ static unsigned char rtc_irq_bits; /* * Enable 1/second update and/or alarm interrupts. */ static int set_rtc_irq_bit(unsigned char bit) { unsigned char val; int ret; /* if the bit is set, return from here */ if (rtc_irq_bits & bit) return 0; val = rtc_irq_bits | bit; val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M; ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); if (ret == 0) rtc_irq_bits = val; return ret; } /* * Disable update and/or alarm interrupts. */ static int mask_rtc_irq_bit(unsigned char bit) { unsigned char val; int ret; /* if the bit is clear, return from here */ if (!(rtc_irq_bits & bit)) return 0; val = rtc_irq_bits & ~bit; ret = twl_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); if (ret == 0) rtc_irq_bits = val; return ret; } static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled) { int ret; if (enabled) ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); else ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); return ret; } /* * Gets current TWL RTC time and date parameters. * * The RTC's time/alarm representation is not what gmtime(3) requires * Linux to use: * * - Months are 1..12 vs Linux 0-11 * - Years are 0..99 vs Linux 1900..N (we assume 21st century) */ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[ALL_TIME_REGS + 1]; int ret; u8 save_control; u8 rtc_control; ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret); return ret; } /* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */ if (twl_class_is_6030()) { if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) { save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M; ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s clr GET_TIME, error %d\n", __func__, ret); return ret; } } } /* Copy RTC counting registers to static registers or latches */ rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M; /* for twl6030/32 enable read access to static shadowed registers */ if (twl_class_is_6030()) rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT; ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret); return ret; } ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data, (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "%s: reading data, error %d\n", __func__, ret); return ret; } /* for twl6030 restore original state of rtc control register */ if (twl_class_is_6030()) { ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); if (ret < 0) { dev_err(dev, "%s: restore CTRL_REG, error %d\n", __func__, ret); return ret; } } tm->tm_sec = bcd2bin(rtc_data[0]); tm->tm_min = bcd2bin(rtc_data[1]); tm->tm_hour = bcd2bin(rtc_data[2]); tm->tm_mday = bcd2bin(rtc_data[3]); tm->tm_mon = bcd2bin(rtc_data[4]) - 1; tm->tm_year = bcd2bin(rtc_data[5]) + 100; return ret; } static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned char save_control; unsigned char rtc_data[ALL_TIME_REGS + 1]; int ret; rtc_data[1] = bin2bcd(tm->tm_sec); rtc_data[2] = bin2bcd(tm->tm_min); rtc_data[3] = bin2bcd(tm->tm_hour); rtc_data[4] = bin2bcd(tm->tm_mday); rtc_data[5] = bin2bcd(tm->tm_mon + 1); rtc_data[6] = bin2bcd(tm->tm_year - 100); /* Stop RTC while updating the TC registers */ ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); if (ret < 0) goto out; save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M; ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); if (ret < 0) goto out; /* update all the time registers in one shot */ ret = twl_i2c_write(TWL_MODULE_RTC, rtc_data, (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_set_time error %d\n", ret); goto out; } /* Start back RTC */ save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M; ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); out: return ret; } /* * Gets current TWL RTC alarm time. */ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char rtc_data[ALL_TIME_REGS + 1]; int ret; ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data, (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_read_alarm error %d\n", ret); return ret; } /* some of these fields may be wildcard/"match all" */ alm->time.tm_sec = bcd2bin(rtc_data[0]); alm->time.tm_min = bcd2bin(rtc_data[1]); alm->time.tm_hour = bcd2bin(rtc_data[2]); alm->time.tm_mday = bcd2bin(rtc_data[3]); alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1; alm->time.tm_year = bcd2bin(rtc_data[5]) + 100; /* report cached alarm enable state */ if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M) alm->enabled = 1; return ret; } static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char alarm_data[ALL_TIME_REGS + 1]; int ret; ret = twl_rtc_alarm_irq_enable(dev, 0); if (ret) goto out; alarm_data[1] = bin2bcd(alm->time.tm_sec); alarm_data[2] = bin2bcd(alm->time.tm_min); alarm_data[3] = bin2bcd(alm->time.tm_hour); alarm_data[4] = bin2bcd(alm->time.tm_mday); alarm_data[5] = bin2bcd(alm->time.tm_mon + 1); alarm_data[6] = bin2bcd(alm->time.tm_year - 100); /* update all the alarm registers in one shot */ ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data, (rtc_reg_map[REG_ALARM_SECONDS_REG]), ALL_TIME_REGS); if (ret) { dev_err(dev, "rtc_set_alarm error %d\n", ret); goto out; } if (alm->enabled) ret = twl_rtc_alarm_irq_enable(dev, 1); out: return ret; } static irqreturn_t twl_rtc_interrupt(int irq, void *rtc) { unsigned long events; int ret = IRQ_NONE; int res; u8 rd_reg; res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); if (res) goto out; /* * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG. * only one (ALARM or RTC) interrupt source may be enabled * at time, we also could check our results * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM] */ if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) events = RTC_IRQF | RTC_AF; else events = RTC_IRQF | RTC_PF; res = twl_rtc_write_u8(BIT_RTC_STATUS_REG_ALARM_M, REG_RTC_STATUS_REG); if (res) goto out; if (twl_class_is_4030()) { /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1 * needs 2 reads to clear the interrupt. One read is done in * do_twl_pwrirq(). Doing the second read, to clear * the bit. * * FIXME the reason PWR_ISR1 needs an extra read is that * RTC_IF retriggered until we cleared REG_ALARM_M above. * But re-reading like this is a bad hack; by doing so we * risk wrongly clearing status for some other IRQ (losing * the interrupt). Be smarter about handling RTC_UF ... */ res = twl_i2c_read_u8(TWL4030_MODULE_INT, &rd_reg, TWL4030_INT_PWR_ISR1); if (res) goto out; } /* Notify RTC core on event */ rtc_update_irq(rtc, 1, events); ret = IRQ_HANDLED; out: return ret; } static struct rtc_class_ops twl_rtc_ops = { .read_time = twl_rtc_read_time, .set_time = twl_rtc_set_time, .read_alarm = twl_rtc_read_alarm, .set_alarm = twl_rtc_set_alarm, .alarm_irq_enable = twl_rtc_alarm_irq_enable, }; /*----------------------------------------------------------------------*/ static int __devinit twl_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; int ret = -EINVAL; int irq = platform_get_irq(pdev, 0); u8 rd_reg; if (irq <= 0) goto out1; ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); if (ret < 0) goto out1; if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M) dev_warn(&pdev->dev, "Power up reset detected.\n"); if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M) dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n"); /* Clear RTC Power up reset and pending alarm interrupts */ ret = twl_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG); if (ret < 0) goto out1; if (twl_class_is_6030()) { twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, REG_INT_MSK_LINE_A); twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK, REG_INT_MSK_STS_A); } dev_info(&pdev->dev, "Enabling TWL-RTC\n"); ret = twl_rtc_write_u8(BIT_RTC_CTRL_REG_STOP_RTC_M, REG_RTC_CTRL_REG); if (ret < 0) goto out1; /* init cached IRQ enable bits */ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); if (ret < 0) goto out1; rtc = rtc_device_register(pdev->name, &pdev->dev, &twl_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { ret = PTR_ERR(rtc); dev_err(&pdev->dev, "can't register RTC device, err %ld\n", PTR_ERR(rtc)); goto out1; } ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt, IRQF_TRIGGER_RISING, dev_name(&rtc->dev), rtc); if (ret < 0) { dev_err(&pdev->dev, "IRQ is not free.\n"); goto out2; } platform_set_drvdata(pdev, rtc); return 0; out2: rtc_device_unregister(rtc); out1: return ret; } /* * Disable all TWL RTC module interrupts. * Sets status flag to free. */ static int __devexit twl_rtc_remove(struct platform_device *pdev) { /* leave rtc running, but disable irqs */ struct rtc_device *rtc = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); if (twl_class_is_6030()) { twl6030_interrupt_mask(TWL6030_RTC_INT_MASK, REG_INT_MSK_LINE_A); twl6030_interrupt_mask(TWL6030_RTC_INT_MASK, REG_INT_MSK_STS_A); } free_irq(irq, rtc); rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); return 0; } static void twl_rtc_shutdown(struct platform_device *pdev) { /* mask timer interrupts, but leave alarm interrupts on to enable power-on when alarm is triggered */ mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); } #ifdef CONFIG_PM static unsigned char irqstat; static int twl_rtc_suspend(struct platform_device *pdev, pm_message_t state) { irqstat = rtc_irq_bits; mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M); return 0; } static int twl_rtc_resume(struct platform_device *pdev) { set_rtc_irq_bit(irqstat); return 0; } #else #define twl_rtc_suspend NULL #define twl_rtc_resume NULL #endif static const struct of_device_id twl_rtc_of_match[] = { {.compatible = "ti,twl4030-rtc", }, { }, }; MODULE_DEVICE_TABLE(of, twl_rtc_of_match); MODULE_ALIAS("platform:twl_rtc"); static struct platform_driver twl4030rtc_driver = { .probe = twl_rtc_probe, .remove = __devexit_p(twl_rtc_remove), .shutdown = twl_rtc_shutdown, .suspend = twl_rtc_suspend, .resume = twl_rtc_resume, .driver = { .owner = THIS_MODULE, .name = "twl_rtc", .of_match_table = twl_rtc_of_match, }, }; static int __init twl_rtc_init(void) { if (twl_class_is_4030()) rtc_reg_map = (u8 *) twl4030_rtc_reg_map; else rtc_reg_map = (u8 *) twl6030_rtc_reg_map; return platform_driver_register(&twl4030rtc_driver); } module_init(twl_rtc_init); static void __exit twl_rtc_exit(void) { platform_driver_unregister(&twl4030rtc_driver); } module_exit(twl_rtc_exit); MODULE_AUTHOR("Texas Instruments, MontaVista Software"); MODULE_LICENSE("GPL");
gpl-2.0
IndieBeto/android_kernel_motorola_msm8226
fs/posix_acl.c
4008
9470
/* * linux/fs/posix_acl.c * * Copyright (C) 2002 by Andreas Gruenbacher <a.gruenbacher@computer.org> * * Fixes from William Schumacher incorporated on 15 March 2001. * (Reported by Charles Bertsch, <CBertsch@microtest.com>). */ /* * This file contains generic functions for manipulating * POSIX 1003.1e draft standard 17 ACLs. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/posix_acl.h> #include <linux/export.h> #include <linux/errno.h> EXPORT_SYMBOL(posix_acl_init); EXPORT_SYMBOL(posix_acl_alloc); EXPORT_SYMBOL(posix_acl_valid); EXPORT_SYMBOL(posix_acl_equiv_mode); EXPORT_SYMBOL(posix_acl_from_mode); /* * Init a fresh posix_acl */ void posix_acl_init(struct posix_acl *acl, int count) { atomic_set(&acl->a_refcount, 1); acl->a_count = count; } /* * Allocate a new ACL with the specified number of entries. */ struct posix_acl * posix_acl_alloc(int count, gfp_t flags) { const size_t size = sizeof(struct posix_acl) + count * sizeof(struct posix_acl_entry); struct posix_acl *acl = kmalloc(size, flags); if (acl) posix_acl_init(acl, count); return acl; } /* * Clone an ACL. */ static struct posix_acl * posix_acl_clone(const struct posix_acl *acl, gfp_t flags) { struct posix_acl *clone = NULL; if (acl) { int size = sizeof(struct posix_acl) + acl->a_count * sizeof(struct posix_acl_entry); clone = kmemdup(acl, size, flags); if (clone) atomic_set(&clone->a_refcount, 1); } return clone; } /* * Check if an acl is valid. Returns 0 if it is, or -E... otherwise. */ int posix_acl_valid(const struct posix_acl *acl) { const struct posix_acl_entry *pa, *pe; int state = ACL_USER_OBJ; unsigned int id = 0; /* keep gcc happy */ int needs_mask = 0; FOREACH_ACL_ENTRY(pa, acl, pe) { if (pa->e_perm & ~(ACL_READ|ACL_WRITE|ACL_EXECUTE)) return -EINVAL; switch (pa->e_tag) { case ACL_USER_OBJ: if (state == ACL_USER_OBJ) { id = 0; state = ACL_USER; break; } return -EINVAL; case ACL_USER: if (state != ACL_USER) return -EINVAL; if (pa->e_id == ACL_UNDEFINED_ID || pa->e_id < id) return -EINVAL; id = pa->e_id + 1; needs_mask = 1; break; case ACL_GROUP_OBJ: if (state == ACL_USER) { id = 0; state = ACL_GROUP; break; } return -EINVAL; case ACL_GROUP: if (state != ACL_GROUP) return -EINVAL; if (pa->e_id == ACL_UNDEFINED_ID || pa->e_id < id) return -EINVAL; id = pa->e_id + 1; needs_mask = 1; break; case ACL_MASK: if (state != ACL_GROUP) return -EINVAL; state = ACL_OTHER; break; case ACL_OTHER: if (state == ACL_OTHER || (state == ACL_GROUP && !needs_mask)) { state = 0; break; } return -EINVAL; default: return -EINVAL; } } if (state == 0) return 0; return -EINVAL; } /* * Returns 0 if the acl can be exactly represented in the traditional * file mode permission bits, or else 1. Returns -E... on error. */ int posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p) { const struct posix_acl_entry *pa, *pe; umode_t mode = 0; int not_equiv = 0; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: mode |= (pa->e_perm & S_IRWXO) << 6; break; case ACL_GROUP_OBJ: mode |= (pa->e_perm & S_IRWXO) << 3; break; case ACL_OTHER: mode |= pa->e_perm & S_IRWXO; break; case ACL_MASK: mode = (mode & ~S_IRWXG) | ((pa->e_perm & S_IRWXO) << 3); not_equiv = 1; break; case ACL_USER: case ACL_GROUP: not_equiv = 1; break; default: return -EINVAL; } } if (mode_p) *mode_p = (*mode_p & ~S_IRWXUGO) | mode; return not_equiv; } /* * Create an ACL representing the file mode permission bits of an inode. */ struct posix_acl * posix_acl_from_mode(umode_t mode, gfp_t flags) { struct posix_acl *acl = posix_acl_alloc(3, flags); if (!acl) return ERR_PTR(-ENOMEM); acl->a_entries[0].e_tag = ACL_USER_OBJ; acl->a_entries[0].e_id = ACL_UNDEFINED_ID; acl->a_entries[0].e_perm = (mode & S_IRWXU) >> 6; acl->a_entries[1].e_tag = ACL_GROUP_OBJ; acl->a_entries[1].e_id = ACL_UNDEFINED_ID; acl->a_entries[1].e_perm = (mode & S_IRWXG) >> 3; acl->a_entries[2].e_tag = ACL_OTHER; acl->a_entries[2].e_id = ACL_UNDEFINED_ID; acl->a_entries[2].e_perm = (mode & S_IRWXO); return acl; } /* * Return 0 if current is granted want access to the inode * by the acl. Returns -E... otherwise. */ int posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want) { const struct posix_acl_entry *pa, *pe, *mask_obj; int found = 0; want &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK; FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: /* (May have been checked already) */ if (inode->i_uid == current_fsuid()) goto check_perm; break; case ACL_USER: if (pa->e_id == current_fsuid()) goto mask; break; case ACL_GROUP_OBJ: if (in_group_p(inode->i_gid)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_GROUP: if (in_group_p(pa->e_id)) { found = 1; if ((pa->e_perm & want) == want) goto mask; } break; case ACL_MASK: break; case ACL_OTHER: if (found) return -EACCES; else goto check_perm; default: return -EIO; } } return -EIO; mask: for (mask_obj = pa+1; mask_obj != pe; mask_obj++) { if (mask_obj->e_tag == ACL_MASK) { if ((pa->e_perm & mask_obj->e_perm & want) == want) return 0; return -EACCES; } } check_perm: if ((pa->e_perm & want) == want) return 0; return -EACCES; } /* * Modify acl when creating a new inode. The caller must ensure the acl is * only referenced once. * * mode_p initially must contain the mode parameter to the open() / creat() * system calls. All permissions that are not granted by the acl are removed. * The permissions in the acl are changed to reflect the mode_p parameter. */ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p) { struct posix_acl_entry *pa, *pe; struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; umode_t mode = *mode_p; int not_equiv = 0; /* assert(atomic_read(acl->a_refcount) == 1); */ FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: pa->e_perm &= (mode >> 6) | ~S_IRWXO; mode &= (pa->e_perm << 6) | ~S_IRWXU; break; case ACL_USER: case ACL_GROUP: not_equiv = 1; break; case ACL_GROUP_OBJ: group_obj = pa; break; case ACL_OTHER: pa->e_perm &= mode | ~S_IRWXO; mode &= pa->e_perm | ~S_IRWXO; break; case ACL_MASK: mask_obj = pa; not_equiv = 1; break; default: return -EIO; } } if (mask_obj) { mask_obj->e_perm &= (mode >> 3) | ~S_IRWXO; mode &= (mask_obj->e_perm << 3) | ~S_IRWXG; } else { if (!group_obj) return -EIO; group_obj->e_perm &= (mode >> 3) | ~S_IRWXO; mode &= (group_obj->e_perm << 3) | ~S_IRWXG; } *mode_p = (*mode_p & ~S_IRWXUGO) | mode; return not_equiv; } /* * Modify the ACL for the chmod syscall. */ static int posix_acl_chmod_masq(struct posix_acl *acl, umode_t mode) { struct posix_acl_entry *group_obj = NULL, *mask_obj = NULL; struct posix_acl_entry *pa, *pe; /* assert(atomic_read(acl->a_refcount) == 1); */ FOREACH_ACL_ENTRY(pa, acl, pe) { switch(pa->e_tag) { case ACL_USER_OBJ: pa->e_perm = (mode & S_IRWXU) >> 6; break; case ACL_USER: case ACL_GROUP: break; case ACL_GROUP_OBJ: group_obj = pa; break; case ACL_MASK: mask_obj = pa; break; case ACL_OTHER: pa->e_perm = (mode & S_IRWXO); break; default: return -EIO; } } if (mask_obj) { mask_obj->e_perm = (mode & S_IRWXG) >> 3; } else { if (!group_obj) return -EIO; group_obj->e_perm = (mode & S_IRWXG) >> 3; } return 0; } int posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) { struct posix_acl *clone = posix_acl_clone(*acl, gfp); int err = -ENOMEM; if (clone) { err = posix_acl_create_masq(clone, mode_p); if (err < 0) { posix_acl_release(clone); clone = NULL; } } posix_acl_release(*acl); *acl = clone; return err; } EXPORT_SYMBOL(posix_acl_create); int posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) { struct posix_acl *clone = posix_acl_clone(*acl, gfp); int err = -ENOMEM; if (clone) { err = posix_acl_chmod_masq(clone, mode); if (err) { posix_acl_release(clone); clone = NULL; } } posix_acl_release(*acl); *acl = clone; return err; } EXPORT_SYMBOL(posix_acl_chmod);
gpl-2.0
karandpr/Doppler3ICS
arch/mips/pci/pci-tx4927.c
4520
2655
/* * Based on linux/arch/mips/txx9/rbtx4938/setup.c, * and RBTX49xx patch from CELF patch archive. * * Copyright 2001, 2003-2005 MontaVista Software Inc. * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <asm/txx9/generic.h> #include <asm/txx9/tx4927.h> int __init tx4927_report_pciclk(void) { int pciclk = 0; printk(KERN_INFO "PCIC --%s PCICLK:", (__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ? " PCI66" : ""); if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) { u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg); switch ((unsigned long)ccfg & TX4927_CCFG_PCIDIVMODE_MASK) { case TX4927_CCFG_PCIDIVMODE_2_5: pciclk = txx9_cpu_clock * 2 / 5; break; case TX4927_CCFG_PCIDIVMODE_3: pciclk = txx9_cpu_clock / 3; break; case TX4927_CCFG_PCIDIVMODE_5: pciclk = txx9_cpu_clock / 5; break; case TX4927_CCFG_PCIDIVMODE_6: pciclk = txx9_cpu_clock / 6; break; } printk("Internal(%u.%uMHz)", (pciclk + 50000) / 1000000, ((pciclk + 50000) / 100000) % 10); } else { printk("External"); pciclk = -1; } printk("\n"); return pciclk; } int __init tx4927_pciclk66_setup(void) { int pciclk; /* Assert M66EN */ tx4927_ccfg_set(TX4927_CCFG_PCI66); /* Double PCICLK (if possible) */ if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) { unsigned int pcidivmode = 0; u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg); pcidivmode = (unsigned long)ccfg & TX4927_CCFG_PCIDIVMODE_MASK; switch (pcidivmode) { case TX4927_CCFG_PCIDIVMODE_5: case TX4927_CCFG_PCIDIVMODE_2_5: pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5; pciclk = txx9_cpu_clock * 2 / 5; break; case TX4927_CCFG_PCIDIVMODE_6: case TX4927_CCFG_PCIDIVMODE_3: default: pcidivmode = TX4927_CCFG_PCIDIVMODE_3; pciclk = txx9_cpu_clock / 3; } tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK, pcidivmode); printk(KERN_DEBUG "PCICLK: ccfg:%08lx\n", (unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg)); } else pciclk = -1; return pciclk; } void __init tx4927_setup_pcierr_irq(void) { if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR, tx4927_pcierr_interrupt, IRQF_DISABLED, "PCI error", (void *)TX4927_PCIC_REG)) printk(KERN_WARNING "Failed to request irq for PCIERR\n"); }
gpl-2.0
tbalden/android_kernel_htc_m7-sense4.3
arch/mips/kernel/kprobes.c
4776
17276
/* * Kernel Probes (KProbes) * arch/mips/kernel/kprobes.c * * Copyright 2006 Sony Corp. * Copyright 2010 Cavium Networks * * Some portions copied from the powerpc version. * * Copyright (C) IBM Corporation, 2002, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kprobes.h> #include <linux/preempt.h> #include <linux/uaccess.h> #include <linux/kdebug.h> #include <linux/slab.h> #include <asm/ptrace.h> #include <asm/branch.h> #include <asm/break.h> #include <asm/inst.h> static const union mips_instruction breakpoint_insn = { .b_format = { .opcode = spec_op, .code = BRK_KPROBE_BP, .func = break_op } }; static const union mips_instruction breakpoint2_insn = { .b_format = { .opcode = spec_op, .code = BRK_KPROBE_SSTEPBP, .func = break_op } }; DEFINE_PER_CPU(struct kprobe *, current_kprobe); DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static int __kprobes insn_has_delayslot(union mips_instruction insn) { switch (insn.i_format.opcode) { /* * This group contains: * jr and jalr are in r_format format. */ case spec_op: switch (insn.r_format.func) { case jr_op: case jalr_op: break; default: goto insn_ok; } /* * This group contains: * bltz_op, bgez_op, bltzl_op, bgezl_op, * bltzal_op, bgezal_op, bltzall_op, bgezall_op. */ case bcond_op: /* * These are unconditional and in j_format. */ case jal_op: case j_op: /* * These are conditional and in i_format. */ case beq_op: case beql_op: case bne_op: case bnel_op: case blez_op: case blezl_op: case bgtz_op: case bgtzl_op: /* * These are the FPA/cp1 branch instructions. */ case cop1_op: #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ case ldc2_op: /* This is bbit032 on Octeon */ case swc2_op: /* This is bbit1 on Octeon */ case sdc2_op: /* This is bbit132 on Octeon */ #endif return 1; default: break; } insn_ok: return 0; } /* * insn_has_ll_or_sc function checks whether instruction is ll or sc * one; putting breakpoint on top of atomic ll/sc pair is bad idea; * so we need to prevent it and refuse kprobes insertion for such * instructions; cannot do much about breakpoint in the middle of * ll/sc pair; it is upto user to avoid those places */ static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) { int ret = 0; switch (insn.i_format.opcode) { case ll_op: case lld_op: case sc_op: case scd_op: ret = 1; break; default: break; } return ret; } int __kprobes arch_prepare_kprobe(struct kprobe *p) { union mips_instruction insn; union mips_instruction prev_insn; int ret = 0; insn = p->addr[0]; if (insn_has_ll_or_sc(insn)) { pr_notice("Kprobes for ll and sc instructions are not" "supported\n"); ret = -EINVAL; goto out; } if ((probe_kernel_read(&prev_insn, p->addr - 1, sizeof(mips_instruction)) == 0) && insn_has_delayslot(prev_insn)) { pr_notice("Kprobes for branch delayslot are not supported\n"); ret = -EINVAL; goto out; } /* insn: must be on special executable page on mips. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) { ret = -ENOMEM; goto out; } /* * In the kprobe->ainsn.insn[] array we store the original * instruction at index zero and a break trap instruction at * index one. * * On MIPS arch if the instruction at probed address is a * branch instruction, we need to execute the instruction at * Branch Delayslot (BD) at the time of probe hit. As MIPS also * doesn't have single stepping support, the BD instruction can * not be executed in-line and it would be executed on SSOL slot * using a normal breakpoint instruction in the next slot. * So, read the instruction and save it for later execution. */ if (insn_has_delayslot(insn)) memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); else memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); p->ainsn.insn[1] = breakpoint2_insn; p->opcode = *p->addr; out: return ret; } void __kprobes arch_arm_kprobe(struct kprobe *p) { *p->addr = breakpoint_insn; flush_insn_slot(p); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { *p->addr = p->opcode; flush_insn_slot(p); } void __kprobes arch_remove_kprobe(struct kprobe *p) { free_insn_slot(p->ainsn.insn, 0); } static void save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; } static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; } static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); kcb->kprobe_saved_epc = regs->cp0_epc; } /** * evaluate_branch_instrucion - * * Evaluate the branch instruction at probed address during probe hit. The * result of evaluation would be the updated epc. The insturction in delayslot * would actually be single stepped using a normal breakpoint) on SSOL slot. * * The result is also saved in the kprobe control block for later use, * in case we need to execute the delayslot instruction. The latter will be * false for NOP instruction in dealyslot and the branch-likely instructions * when the branch is taken. And for those cases we set a flag as * SKIP_DELAYSLOT in the kprobe control block */ static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { union mips_instruction insn = p->opcode; long epc; int ret = 0; epc = regs->cp0_epc; if (epc & 3) goto unaligned; if (p->ainsn.insn->word == 0) kcb->flags |= SKIP_DELAYSLOT; else kcb->flags &= ~SKIP_DELAYSLOT; ret = __compute_return_epc_for_insn(regs, insn); if (ret < 0) return ret; if (ret == BRANCH_LIKELY_TAKEN) kcb->flags |= SKIP_DELAYSLOT; kcb->target_epc = regs->cp0_epc; return 0; unaligned: pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; } static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { int ret = 0; regs->cp0_status &= ~ST0_IE; /* single step inline if the instruction is a break */ if (p->opcode.word == breakpoint_insn.word || p->opcode.word == breakpoint2_insn.word) regs->cp0_epc = (unsigned long)p->addr; else if (insn_has_delayslot(p->opcode)) { ret = evaluate_branch_instruction(p, regs, kcb); if (ret < 0) { pr_notice("Kprobes: Error in evaluating branch\n"); return; } } regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; } /* * Called after single-stepping. p->addr is the address of the * instruction whose first byte has been replaced by the "break 0" * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. * * This function prepares to return from the post-single-step * breakpoint trap. In case of branch instructions, the target * epc to be restored. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { if (insn_has_delayslot(p->opcode)) regs->cp0_epc = kcb->target_epc; else { unsigned long orig_epc = kcb->kprobe_saved_epc; regs->cp0_epc = orig_epc + 4; } } static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; kprobe_opcode_t *addr; struct kprobe_ctlblk *kcb; addr = (kprobe_opcode_t *) regs->cp0_epc; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Check we're not actually recursing */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if (kcb->kprobe_status == KPROBE_HIT_SS && p->ainsn.insn->word == breakpoint_insn.word) { regs->cp0_status &= ~ST0_IE; regs->cp0_status |= kcb->kprobe_saved_SR; goto no_kprobe; } /* * We have reentered the kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); kprobes_inc_nmissed_count(p); prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_REENTER; if (kcb->flags & SKIP_DELAYSLOT) { resume_execution(p, regs, kcb); restore_previous_kprobe(kcb); preempt_enable_no_resched(); } return 1; } else { if (addr->word != breakpoint_insn.word) { /* * The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } goto no_kprobe; } p = get_kprobe(addr); if (!p) { if (addr->word != breakpoint_insn.word) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) { /* handler has already set things up, so skip ss setup */ return 1; } ss_probe: prepare_singlestep(p, regs, kcb); if (kcb->flags & SKIP_DELAYSLOT) { kcb->kprobe_status = KPROBE_HIT_SSDONE; if (p->post_handler) p->post_handler(p, regs, 0); resume_execution(p, regs, kcb); preempt_enable_no_resched(); } else kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } static inline int post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } resume_execution(cur, regs, kcb); regs->cp0_status |= kcb->kprobe_saved_SR; /* Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); return 1; } static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) return 1; if (kcb->kprobe_status & KPROBE_HIT_SS) { resume_execution(cur, regs, kcb); regs->cp0_status |= kcb->kprobe_old_SR; reset_current_kprobe(); preempt_enable_no_resched(); } return 0; } /* * Wrapper routine for handling exceptions. */ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; switch (val) { case DIE_BREAK: if (kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; case DIE_SSTEPBP: if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; case DIE_PAGE_FAULT: /* kprobe_running() needs smp_processor_id() */ preempt_disable(); if (kprobe_running() && kprobe_fault_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; preempt_enable(); break; default: break; } return ret; } int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); kcb->jprobe_saved_regs = *regs; kcb->jprobe_saved_sp = regs->regs[29]; memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); regs->cp0_epc = (unsigned long)(jp->entry); return 1; } /* Defined in the inline asm below. */ void jprobe_return_end(void); void __kprobes jprobe_return(void) { /* Assembler quirk necessitates this '0,code' business. */ asm volatile( "break 0,%0\n\t" ".globl jprobe_return_end\n" "jprobe_return_end:\n" : : "n" (BRK_KPROBE_BP) : "memory"); } int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (regs->cp0_epc >= (unsigned long)jprobe_return && regs->cp0_epc <= (unsigned long)jprobe_return_end) { *regs = kcb->jprobe_saved_regs; memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); preempt_enable_no_resched(); return 1; } return 0; } /* * Function return probe trampoline: * - init_kprobes() establishes a probepoint here * - When the probed function returns, this probe causes the * handlers to fire */ static void __used kretprobe_trampoline_holder(void) { asm volatile( ".set push\n\t" /* Keep the assembler from reordering and placing JR here. */ ".set noreorder\n\t" "nop\n\t" ".global kretprobe_trampoline\n" "kretprobe_trampoline:\n\t" "nop\n\t" ".set pop" : : : "memory"); } void kretprobe_trampoline(void); void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; /* Replace the return addr with trampoline addr */ regs->regs[31] = (unsigned long)kretprobe_trampoline; } /* * Called when the probe at kretprobe trampoline is hit */ static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; struct hlist_node *node, *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* * This is the real return address. Any other * instances associated with this task are for * other calls deeper on the call stack */ break; } kretprobe_assert(ri, orig_ret_address, trampoline_address); instruction_pointer(regs) = orig_ret_address; reset_current_kprobe(); kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) return 1; return 0; } static struct kprobe trampoline_p = { .addr = (kprobe_opcode_t *)kretprobe_trampoline, .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); }
gpl-2.0
neykov/chipidea-device-driver
drivers/video/backlight/aat2870_bl.c
5032
6205
/* * linux/drivers/video/backlight/aat2870_bl.c * * Copyright (c) 2011, NVIDIA Corporation. * Author: Jin Park <jinyoungp@nvidia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/mfd/aat2870.h> struct aat2870_bl_driver_data { struct platform_device *pdev; struct backlight_device *bd; int channels; int max_current; int brightness; /* current brightness */ }; static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl, int brightness) { struct backlight_device *bd = aat2870_bl->bd; int val; val = brightness * (aat2870_bl->max_current - 1); val /= bd->props.max_brightness; return val; } static inline int aat2870_bl_enable(struct aat2870_bl_driver_data *aat2870_bl) { struct aat2870_data *aat2870 = dev_get_drvdata(aat2870_bl->pdev->dev.parent); return aat2870->write(aat2870, AAT2870_BL_CH_EN, (u8)aat2870_bl->channels); } static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl) { struct aat2870_data *aat2870 = dev_get_drvdata(aat2870_bl->pdev->dev.parent); return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0); } static int aat2870_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static int aat2870_bl_update_status(struct backlight_device *bd) { struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev); struct aat2870_data *aat2870 = dev_get_drvdata(aat2870_bl->pdev->dev.parent); int brightness = bd->props.brightness; int ret; if ((brightness < 0) || (bd->props.max_brightness < brightness)) { dev_err(&bd->dev, "invalid brightness, %d\n", brightness); return -EINVAL; } dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n", bd->props.brightness, bd->props.power, bd->props.state); if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.state & BL_CORE_FBBLANK) || (bd->props.state & BL_CORE_SUSPENDED)) brightness = 0; ret = aat2870->write(aat2870, AAT2870_BLM, (u8)aat2870_brightness(aat2870_bl, brightness)); if (ret < 0) return ret; if (brightness == 0) { ret = aat2870_bl_disable(aat2870_bl); if (ret < 0) return ret; } else if (aat2870_bl->brightness == 0) { ret = aat2870_bl_enable(aat2870_bl); if (ret < 0) return ret; } aat2870_bl->brightness = brightness; return 0; } static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi) { return 1; } static const struct backlight_ops aat2870_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .get_brightness = aat2870_bl_get_brightness, .update_status = aat2870_bl_update_status, .check_fb = aat2870_bl_check_fb, }; static int aat2870_bl_probe(struct platform_device *pdev) { struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data; struct aat2870_bl_driver_data *aat2870_bl; struct backlight_device *bd; struct backlight_properties props; int ret = 0; if (!pdata) { dev_err(&pdev->dev, "No platform data\n"); ret = -ENXIO; goto out; } if (pdev->id != AAT2870_ID_BL) { dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id); ret = -EINVAL; goto out; } aat2870_bl = devm_kzalloc(&pdev->dev, sizeof(struct aat2870_bl_driver_data), GFP_KERNEL); if (!aat2870_bl) { dev_err(&pdev->dev, "Failed to allocate memory for aat2870 backlight\n"); ret = -ENOMEM; goto out; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; bd = backlight_device_register("aat2870-backlight", &pdev->dev, aat2870_bl, &aat2870_bl_ops, &props); if (IS_ERR(bd)) { dev_err(&pdev->dev, "Failed allocate memory for backlight device\n"); ret = PTR_ERR(bd); goto out; } aat2870_bl->pdev = pdev; platform_set_drvdata(pdev, aat2870_bl); aat2870_bl->bd = bd; if (pdata->channels > 0) aat2870_bl->channels = pdata->channels; else aat2870_bl->channels = AAT2870_BL_CH_ALL; if (pdata->max_current > 0) aat2870_bl->max_current = pdata->max_current; else aat2870_bl->max_current = AAT2870_CURRENT_27_9; if (pdata->max_brightness > 0) bd->props.max_brightness = pdata->max_brightness; else bd->props.max_brightness = 255; aat2870_bl->brightness = 0; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = bd->props.max_brightness; ret = aat2870_bl_update_status(bd); if (ret < 0) { dev_err(&pdev->dev, "Failed to initialize\n"); goto out_bl_dev_unregister; } return 0; out_bl_dev_unregister: backlight_device_unregister(bd); out: return ret; } static int aat2870_bl_remove(struct platform_device *pdev) { struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev); struct backlight_device *bd = aat2870_bl->bd; bd->props.power = FB_BLANK_POWERDOWN; bd->props.brightness = 0; backlight_update_status(bd); backlight_device_unregister(bd); return 0; } static struct platform_driver aat2870_bl_driver = { .driver = { .name = "aat2870-backlight", .owner = THIS_MODULE, }, .probe = aat2870_bl_probe, .remove = aat2870_bl_remove, }; static int __init aat2870_bl_init(void) { return platform_driver_register(&aat2870_bl_driver); } subsys_initcall(aat2870_bl_init); static void __exit aat2870_bl_exit(void) { platform_driver_unregister(&aat2870_bl_driver); } module_exit(aat2870_bl_exit); MODULE_DESCRIPTION("AnalogicTech AAT2870 Backlight"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
gpl-2.0
javelinanddart/Canuck
drivers/media/video/davinci/dm644x_ccdc.c
5032
32047
/* * Copyright (C) 2006-2009 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * CCDC hardware module for DM6446 * ------------------------------ * * This module is for configuring CCD controller of DM6446 VPFE to capture * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules * such as Defect Pixel Correction, Color Space Conversion etc to * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This * module also allows application to configure individual * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header * files. The setparams() API is called by vpfe_capture driver * to configure module parameters. This file is named DM644x so that other * variants such DM6443 may be supported using the same module. * * TODO: Test Raw bayer parameter settings and bayer capture * Split module parameter structure to module specific ioctl structs * investigate if enum used for user space type definition * to be replaced by #defines or integer */ #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/videodev2.h> #include <linux/gfp.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/module.h> #include <media/davinci/dm644x_ccdc.h> #include <media/davinci/vpss.h> #include "dm644x_ccdc_regs.h" #include "ccdc_hw_device.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CCDC Driver for DM6446"); MODULE_AUTHOR("Texas Instruments"); static struct ccdc_oper_config { struct device *dev; /* CCDC interface type */ enum vpfe_hw_if_type if_type; /* Raw Bayer configuration */ struct ccdc_params_raw bayer; /* YCbCr configuration */ struct ccdc_params_ycbcr ycbcr; /* Master clock */ struct clk *mclk; /* slave clock */ struct clk *sclk; /* ccdc base address */ void __iomem *base_addr; } ccdc_cfg = { /* Raw configurations */ .bayer = { .pix_fmt = CCDC_PIXFMT_RAW, .frm_fmt = CCDC_FRMFMT_PROGRESSIVE, .win = CCDC_WIN_VGA, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .config_params = { .data_sz = CCDC_DATA_10BITS, }, }, .ycbcr = { .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT, .frm_fmt = CCDC_FRMFMT_INTERLACED, .win = CCDC_WIN_PAL, .fid_pol = VPFE_PINPOL_POSITIVE, .vd_pol = VPFE_PINPOL_POSITIVE, .hd_pol = VPFE_PINPOL_POSITIVE, .bt656_enable = 1, .pix_order = CCDC_PIXORDER_CBYCRY, .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED }, }; #define CCDC_MAX_RAW_YUV_FORMATS 2 /* Raw Bayer formats */ static u32 ccdc_raw_bayer_pix_formats[] = {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16}; /* Raw YUV formats */ static u32 ccdc_raw_yuv_pix_formats[] = {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV}; /* CCDC Save/Restore context */ static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)]; /* register access routines */ static inline u32 regr(u32 offset) { return __raw_readl(ccdc_cfg.base_addr + offset); } static inline void regw(u32 val, u32 offset) { __raw_writel(val, ccdc_cfg.base_addr + offset); } static void ccdc_enable(int flag) { regw(flag, CCDC_PCR); } static void ccdc_enable_vport(int flag) { if (flag) /* enable video port */ regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG); else regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG); } /* * ccdc_setwin() * This function will configure the window size * to be capture in CCDC reg */ void ccdc_setwin(struct v4l2_rect *image_win, enum ccdc_frmfmt frm_fmt, int ppc) { int horz_start, horz_nr_pixels; int vert_start, vert_nr_lines; int val = 0, mid_img = 0; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin..."); /* * ppc - per pixel count. indicates how many pixels per cell * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. * raw capture this is 1 */ horz_start = image_win->left << (ppc - 1); horz_nr_pixels = (image_win->width << (ppc - 1)) - 1; regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels, CCDC_HORZ_INFO); vert_start = image_win->top; if (frm_fmt == CCDC_FRMFMT_INTERLACED) { vert_nr_lines = (image_win->height >> 1) - 1; vert_start >>= 1; /* Since first line doesn't have any data */ vert_start += 1; /* configure VDINT0 */ val = (vert_start << CCDC_VDINT_VDINT0_SHIFT); regw(val, CCDC_VDINT); } else { /* Since first line doesn't have any data */ vert_start += 1; vert_nr_lines = image_win->height - 1; /* * configure VDINT0 and VDINT1. VDINT1 will be at half * of image height */ mid_img = vert_start + (image_win->height / 2); val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) | (mid_img & CCDC_VDINT_VDINT1_MASK); regw(val, CCDC_VDINT); } regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start, CCDC_VERT_START); regw(vert_nr_lines, CCDC_VERT_LINES); dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); } static void ccdc_readregs(void) { unsigned int val = 0; val = regr(CCDC_ALAW); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val); val = regr(CCDC_CLAMP); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val); val = regr(CCDC_DCSUB); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val); val = regr(CCDC_BLKCMP); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val); val = regr(CCDC_FPC_ADDR); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val); val = regr(CCDC_FPC); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val); val = regr(CCDC_FMTCFG); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val); val = regr(CCDC_COLPTN); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val); val = regr(CCDC_FMT_HORZ); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val); val = regr(CCDC_FMT_VERT); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val); val = regr(CCDC_HSIZE_OFF); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val); val = regr(CCDC_SDOFST); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val); val = regr(CCDC_VP_OUT); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val); val = regr(CCDC_SYN_MODE); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val); val = regr(CCDC_HORZ_INFO); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val); val = regr(CCDC_VERT_START); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val); val = regr(CCDC_VERT_LINES); dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); } static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) { if (ccdcparam->alaw.enable) { if ((ccdcparam->alaw.gama_wd > CCDC_GAMMA_BITS_09_0) || (ccdcparam->alaw.gama_wd < CCDC_GAMMA_BITS_15_6) || (ccdcparam->alaw.gama_wd < ccdcparam->data_sz)) { dev_dbg(ccdc_cfg.dev, "\nInvalid data line select"); return -1; } } return 0; } static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int *fpc_virtaddr = NULL; unsigned int *fpc_physaddr = NULL; memcpy(config_params, raw_params, sizeof(*raw_params)); /* * allocate memory for fault pixel table and copy the user * values to the table */ if (!config_params->fault_pxl.enable) return 0; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; fpc_virtaddr = (unsigned int *)phys_to_virt( (unsigned long)fpc_physaddr); /* * Allocate memory for FPC table if current * FPC table buffer is not big enough to * accommodate FPC Number requested */ if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { if (fpc_physaddr != NULL) { free_pages((unsigned long)fpc_physaddr, get_order (config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } /* Allocate memory for FPC table */ fpc_virtaddr = (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, get_order(raw_params-> fault_pxl.fp_num * FP_NUM_BYTES)); if (fpc_virtaddr == NULL) { dev_dbg(ccdc_cfg.dev, "\nUnable to allocate memory for FPC"); return -EFAULT; } fpc_physaddr = (unsigned int *)virt_to_phys((void *)fpc_virtaddr); } /* Copy number of fault pixels and FPC table */ config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; if (copy_from_user(fpc_virtaddr, (void __user *)raw_params->fault_pxl.fpc_table_addr, config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed"); return -EFAULT; } config_params->fault_pxl.fpc_table_addr = (unsigned int)fpc_physaddr; return 0; } static int ccdc_close(struct device *dev) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; if (fpc_physaddr != NULL) { fpc_virtaddr = (unsigned int *) phys_to_virt((unsigned long)fpc_physaddr); free_pages((unsigned long)fpc_virtaddr, get_order(config_params->fault_pxl.fp_num * FP_NUM_BYTES)); } return 0; } /* * ccdc_restore_defaults() * This function will write defaults to all CCDC registers */ static void ccdc_restore_defaults(void) { int i; /* disable CCDC */ ccdc_enable(0); /* set all registers to default value */ for (i = 4; i <= 0x94; i += 4) regw(0, i); regw(CCDC_NO_CULLING, CCDC_CULLING); regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW); } static int ccdc_open(struct device *device) { ccdc_restore_defaults(); if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_enable_vport(1); return 0; } static void ccdc_sbl_reset(void) { vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); } /* Parameter operations */ static int ccdc_set_params(void __user *params) { struct ccdc_config_params_raw ccdc_raw_params; int x; if (ccdc_cfg.if_type != VPFE_RAW_BAYER) return -EINVAL; x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); if (x) { dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying" "ccdc params, %d\n", x); return -EFAULT; } if (!validate_ccdc_param(&ccdc_raw_params)) { if (!ccdc_update_raw_params(&ccdc_raw_params)) return 0; } return -EINVAL; } /* * ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture */ void ccdc_config_ycbcr(void) { struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr; u32 syn_mode; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr..."); /* * first restore the CCDC registers to default values * This is important since we assume default values to be set in * a lot of registers that we didn't touch */ ccdc_restore_defaults(); /* * configure pixel format, frame format, configure video frame * format, enable output to SDRAM, enable internal timing generator * and 8bit pack mode */ syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) << CCDC_SYN_MODE_INPMOD_SHIFT) | ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) << CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE | CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE); /* setup BT.656 sync mode */ if (params->bt656_enable) { regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF); /* * configure the FID, VD, HD pin polarity, * fld,hd pol positive, vd negative, 8-bit data */ syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE; if (ccdc_cfg.if_type == VPFE_BT656_10BIT) syn_mode |= CCDC_SYN_MODE_10BITS; else syn_mode |= CCDC_SYN_MODE_8BITS; } else { /* y/c external sync mode */ syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT)); } regw(syn_mode, CCDC_SYN_MODE); /* configure video window */ ccdc_setwin(&params->win, params->frm_fmt, 2); /* * configure the order of y cb cr in SDRAM, and disable latch * internal register on vsync */ if (ccdc_cfg.if_type == VPFE_BT656_10BIT) regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT, CCDC_CCDCFG); else regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) | CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * configure the horizontal line offset. This should be a * on 32 byte boundary. So clear LSB 5 bits */ regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF); /* configure the memory line offset */ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) /* two fields are interleaved in memory */ regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST); ccdc_sbl_reset(); dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n"); } static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp) { u32 val; if (!bclamp->enable) { /* configure DCSub */ val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK; regw(val, CCDC_DCSUB); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val); regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n"); return; } /* * Configure gain, Start pixel, No of line to be avg, * No of pixel/line to be avg, & Enable the Black clamping */ val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) | ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) << CCDC_BLK_ST_PXL_SHIFT) | ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) << CCDC_BLK_SAMPLE_LINE_SHIFT) | ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) << CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE); regw(val, CCDC_CLAMP); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val); /* If Black clamping is enable then make dcsub 0 */ regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB); dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n"); } static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) { u32 val; val = ((bcomp->b & CCDC_BLK_COMP_MASK) | ((bcomp->gb & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GB_COMP_SHIFT) | ((bcomp->gr & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_GR_COMP_SHIFT) | ((bcomp->r & CCDC_BLK_COMP_MASK) << CCDC_BLK_COMP_R_COMP_SHIFT)); regw(val, CCDC_BLKCMP); } static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) { u32 val; /* Initially disable FPC */ val = CCDC_FPC_DISABLE; regw(val, CCDC_FPC); if (!fpc->enable) return; /* Configure Fault pixel if needed */ regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC_ADDR...\n", (fpc->fpc_table_addr)); /* Write the FPC params with FPC disable */ val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; regw(val, CCDC_FPC); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); /* read the FPC register */ val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; regw(val, CCDC_FPC); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); } /* * ccdc_config_raw() * This function will configure CCDC for Raw capture mode */ void ccdc_config_raw(void) { struct ccdc_params_raw *params = &ccdc_cfg.bayer; struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int syn_mode = 0; unsigned int val; dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw..."); /* Reset CCDC */ ccdc_restore_defaults(); /* Disable latching function registers on VSYNC */ regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG); /* * Configure the vertical sync polarity(SYN_MODE.VDPOL), * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity * (SYN_MODE.FLDPOL), frame format(progressive or interlace), * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output * SDRAM, enable internal timing generator */ syn_mode = (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) | ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) | ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) | ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) | ((config_params->data_sz & CCDC_DATA_SZ_MASK) << CCDC_DATA_SZ_SHIFT) | ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) | CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE); /* Enable and configure aLaw register if needed */ if (config_params->alaw.enable) { val = ((config_params->alaw.gama_wd & CCDC_ALAW_GAMA_WD_MASK) | CCDC_ALAW_ENABLE); regw(val, CCDC_ALAW); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val); } /* Configure video window */ ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW); /* Configure Black Clamp */ ccdc_config_black_clamp(&config_params->blk_clamp); /* Configure Black level compensation */ ccdc_config_black_compense(&config_params->blk_comp); /* Configure Fault Pixel Correction */ ccdc_config_fpc(&config_params->fault_pxl); /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) syn_mode |= CCDC_DATA_PACK_ENABLE; #ifdef CONFIG_DM644X_VIDEO_PORT_ENABLE /* enable video port */ val = CCDC_ENABLE_VIDEO_PORT; #else /* disable video port */ val = CCDC_DISABLE_VIDEO_PORT; #endif if (config_params->data_sz == CCDC_DATA_8BITS) val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; else val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK) << CCDC_FMTCFG_VPIN_SHIFT; /* Write value in FMTCFG */ regw(val, CCDC_FMTCFG); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val); /* Configure the color pattern according to mt9t001 sensor */ regw(CCDC_COLPTN_VAL, CCDC_COLPTN); dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n"); /* * Configure Data formatter(Video port) pixel selection * (FMT_HORZ, FMT_VERT) */ val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) << CCDC_FMT_HORZ_FMTSPH_SHIFT) | (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK); regw(val, CCDC_FMT_HORZ); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val); val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK) << CCDC_FMT_VERT_FMTSLV_SHIFT; if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK; else val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK; dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n", params->win.height); regw(val, CCDC_FMT_VERT); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val); dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)..."); /* * Configure Horizontal offset register. If pack 8 is enabled then * 1 pixel will take 1 byte */ if ((config_params->data_sz == CCDC_DATA_8BITS) || config_params->alaw.enable) regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); else /* else one pixel will take 2 byte */ regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) + CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF); /* Set value for SDOFST */ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { if (params->image_invert_enable) { /* For intelace inverse mode */ regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n"); } else { /* For intelace non inverse mode */ regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n"); } } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST); dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n"); } /* * Configure video port pixel selection (VPOUT) * Here -1 is to make the height value less than FMT_VERT.FMTLNV */ if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; else val = ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) - 1) & CCDC_VP_OUT_VERT_NUM_MASK)) << CCDC_VP_OUT_VERT_NUM_SHIFT; val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK) << CCDC_VP_OUT_HORZ_NUM_SHIFT; val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK; regw(val, CCDC_VP_OUT); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val); regw(syn_mode, CCDC_SYN_MODE); dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode); ccdc_sbl_reset(); dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw..."); ccdc_readregs(); } static int ccdc_configure(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_config_raw(); else ccdc_config_ycbcr(); return 0; } static int ccdc_set_buftype(enum ccdc_buftype buf_type) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.buf_type = buf_type; else ccdc_cfg.ycbcr.buf_type = buf_type; return 0; } static enum ccdc_buftype ccdc_get_buftype(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc_cfg.bayer.buf_type; return ccdc_cfg.ycbcr.buf_type; } static int ccdc_enum_pix(u32 *pix, int i) { int ret = -EINVAL; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) { *pix = ccdc_raw_bayer_pix_formats[i]; ret = 0; } } else { if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) { *pix = ccdc_raw_yuv_pix_formats[i]; ret = 0; } } return ret; } static int ccdc_set_pixel_format(u32 pixfmt) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; if (pixfmt == V4L2_PIX_FMT_SBGGR8) ccdc_cfg.bayer.config_params.alaw.enable = 1; else if (pixfmt != V4L2_PIX_FMT_SBGGR16) return -EINVAL; } else { if (pixfmt == V4L2_PIX_FMT_YUYV) ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; else if (pixfmt == V4L2_PIX_FMT_UYVY) ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; else return -EINVAL; } return 0; } static u32 ccdc_get_pixel_format(void) { struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw; u32 pixfmt; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) if (alaw->enable) pixfmt = V4L2_PIX_FMT_SBGGR8; else pixfmt = V4L2_PIX_FMT_SBGGR16; else { if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) pixfmt = V4L2_PIX_FMT_YUYV; else pixfmt = V4L2_PIX_FMT_UYVY; } return pixfmt; } static int ccdc_set_image_window(struct v4l2_rect *win) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.win = *win; else ccdc_cfg.ycbcr.win = *win; return 0; } static void ccdc_get_image_window(struct v4l2_rect *win) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) *win = ccdc_cfg.bayer.win; else *win = ccdc_cfg.ycbcr.win; } static unsigned int ccdc_get_line_length(void) { struct ccdc_config_params_raw *config_params = &ccdc_cfg.bayer.config_params; unsigned int len; if (ccdc_cfg.if_type == VPFE_RAW_BAYER) { if ((config_params->alaw.enable) || (config_params->data_sz == CCDC_DATA_8BITS)) len = ccdc_cfg.bayer.win.width; else len = ccdc_cfg.bayer.win.width * 2; } else len = ccdc_cfg.ycbcr.win.width * 2; return ALIGN(len, 32); } static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc_cfg.bayer.frm_fmt = frm_fmt; else ccdc_cfg.ycbcr.frm_fmt = frm_fmt; return 0; } static enum ccdc_frmfmt ccdc_get_frame_format(void) { if (ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc_cfg.bayer.frm_fmt; else return ccdc_cfg.ycbcr.frm_fmt; } static int ccdc_getfid(void) { return (regr(CCDC_SYN_MODE) >> 15) & 1; } /* misc operations */ static inline void ccdc_setfbaddr(unsigned long addr) { regw(addr & 0xffffffe0, CCDC_SDR_ADDR); } static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params) { ccdc_cfg.if_type = params->if_type; switch (params->if_type) { case VPFE_BT656: case VPFE_YCBCR_SYNC_16: case VPFE_YCBCR_SYNC_8: case VPFE_BT656_10BIT: ccdc_cfg.ycbcr.vd_pol = params->vdpol; ccdc_cfg.ycbcr.hd_pol = params->hdpol; break; default: /* TODO add support for raw bayer here */ return -EINVAL; } return 0; } static void ccdc_save_context(void) { ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR); ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE); ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID); ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES); ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO); ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START); ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES); ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING); ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF); ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST); ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR); ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP); ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB); ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN); ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP); ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC); ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR); ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT); ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW); ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF); ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG); ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG); ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ); ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT); ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0); ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1); ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2); ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3); ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4); ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5); ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6); ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7); ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0); ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1); ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0); ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1); ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT); } static void ccdc_restore_context(void) { regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE); regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID); regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES); regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO); regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START); regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES); regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING); regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF); regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST); regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR); regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP); regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB); regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN); regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP); regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC); regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR); regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT); regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW); regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF); regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG); regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG); regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ); regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT); regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0); regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1); regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2); regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3); regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4); regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5); regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6); regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7); regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0); regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1); regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0); regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1); regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT); regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR); } static struct ccdc_hw_device ccdc_hw_dev = { .name = "DM6446 CCDC", .owner = THIS_MODULE, .hw_ops = { .open = ccdc_open, .close = ccdc_close, .reset = ccdc_sbl_reset, .enable = ccdc_enable, .set_hw_if_params = ccdc_set_hw_if_params, .set_params = ccdc_set_params, .configure = ccdc_configure, .set_buftype = ccdc_set_buftype, .get_buftype = ccdc_get_buftype, .enum_pix = ccdc_enum_pix, .set_pixel_format = ccdc_set_pixel_format, .get_pixel_format = ccdc_get_pixel_format, .set_frame_format = ccdc_set_frame_format, .get_frame_format = ccdc_get_frame_format, .set_image_window = ccdc_set_image_window, .get_image_window = ccdc_get_image_window, .get_line_length = ccdc_get_line_length, .setfbaddr = ccdc_setfbaddr, .getfid = ccdc_getfid, }, }; static int __init dm644x_ccdc_probe(struct platform_device *pdev) { struct resource *res; int status = 0; /* * first try to register with vpfe. If not correct platform, then we * don't have to iomap */ status = vpfe_register_ccdc_device(&ccdc_hw_dev); if (status < 0) return status; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { status = -ENODEV; goto fail_nores; } res = request_mem_region(res->start, resource_size(res), res->name); if (!res) { status = -EBUSY; goto fail_nores; } ccdc_cfg.base_addr = ioremap_nocache(res->start, resource_size(res)); if (!ccdc_cfg.base_addr) { status = -ENOMEM; goto fail_nomem; } /* Get and enable Master clock */ ccdc_cfg.mclk = clk_get(&pdev->dev, "master"); if (IS_ERR(ccdc_cfg.mclk)) { status = PTR_ERR(ccdc_cfg.mclk); goto fail_nomap; } if (clk_enable(ccdc_cfg.mclk)) { status = -ENODEV; goto fail_mclk; } /* Get and enable Slave clock */ ccdc_cfg.sclk = clk_get(&pdev->dev, "slave"); if (IS_ERR(ccdc_cfg.sclk)) { status = PTR_ERR(ccdc_cfg.sclk); goto fail_mclk; } if (clk_enable(ccdc_cfg.sclk)) { status = -ENODEV; goto fail_sclk; } ccdc_cfg.dev = &pdev->dev; printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name); return 0; fail_sclk: clk_put(ccdc_cfg.sclk); fail_mclk: clk_put(ccdc_cfg.mclk); fail_nomap: iounmap(ccdc_cfg.base_addr); fail_nomem: release_mem_region(res->start, resource_size(res)); fail_nores: vpfe_unregister_ccdc_device(&ccdc_hw_dev); return status; } static int dm644x_ccdc_remove(struct platform_device *pdev) { struct resource *res; clk_put(ccdc_cfg.mclk); clk_put(ccdc_cfg.sclk); iounmap(ccdc_cfg.base_addr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) release_mem_region(res->start, resource_size(res)); vpfe_unregister_ccdc_device(&ccdc_hw_dev); return 0; } static int dm644x_ccdc_suspend(struct device *dev) { /* Save CCDC context */ ccdc_save_context(); /* Disable CCDC */ ccdc_enable(0); /* Disable both master and slave clock */ clk_disable(ccdc_cfg.mclk); clk_disable(ccdc_cfg.sclk); return 0; } static int dm644x_ccdc_resume(struct device *dev) { /* Enable both master and slave clock */ clk_enable(ccdc_cfg.mclk); clk_enable(ccdc_cfg.sclk); /* Restore CCDC context */ ccdc_restore_context(); return 0; } static const struct dev_pm_ops dm644x_ccdc_pm_ops = { .suspend = dm644x_ccdc_suspend, .resume = dm644x_ccdc_resume, }; static struct platform_driver dm644x_ccdc_driver = { .driver = { .name = "dm644x_ccdc", .owner = THIS_MODULE, .pm = &dm644x_ccdc_pm_ops, }, .remove = __devexit_p(dm644x_ccdc_remove), .probe = dm644x_ccdc_probe, }; module_platform_driver(dm644x_ccdc_driver);
gpl-2.0
AudioGod/Gods-Kernel-Huawei-Angler
lib/find_last_bit.c
8104
1139
/* find_last_bit.c: fallback find next bit implementation * * Copyright (C) 2008 IBM Corporation * Written by Rusty Russell <rusty@rustcorp.com.au> * (Inspired by David Howell's find_next_bit implementation) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/export.h> #include <asm/types.h> #include <asm/byteorder.h> #ifndef find_last_bit unsigned long find_last_bit(const unsigned long *addr, unsigned long size) { unsigned long words; unsigned long tmp; /* Start at final word. */ words = size / BITS_PER_LONG; /* Partial final word? */ if (size & (BITS_PER_LONG-1)) { tmp = (addr[words] & (~0UL >> (BITS_PER_LONG - (size & (BITS_PER_LONG-1))))); if (tmp) goto found; } while (words) { tmp = addr[--words]; if (tmp) { found: return words * BITS_PER_LONG + __fls(tmp); } } /* Not found */ return size; } EXPORT_SYMBOL(find_last_bit); #endif
gpl-2.0
KOala888/Reborn
arch/arm/plat-spear/padmux.c
8104
3900
/* * arch/arm/plat-spear/include/plat/padmux.c * * SPEAr platform specific gpio pads muxing source file * * Copyright (C) 2009 ST Microelectronics * Viresh Kumar<viresh.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include <plat/padmux.h> /* * struct pmx: pmx definition structure * * base: base address of configuration registers * mode_reg: mode configurations * mux_reg: muxing configurations * active_mode: pointer to current active mode */ struct pmx { u32 base; struct pmx_reg mode_reg; struct pmx_reg mux_reg; struct pmx_mode *active_mode; }; static struct pmx *pmx; /** * pmx_mode_set - Enables an multiplexing mode * @mode - pointer to pmx mode * * It will set mode of operation in hardware. * Returns -ve on Err otherwise 0 */ static int pmx_mode_set(struct pmx_mode *mode) { u32 val; if (!mode->name) return -EFAULT; pmx->active_mode = mode; val = readl(pmx->base + pmx->mode_reg.offset); val &= ~pmx->mode_reg.mask; val |= mode->mask & pmx->mode_reg.mask; writel(val, pmx->base + pmx->mode_reg.offset); return 0; } /** * pmx_devs_enable - Enables list of devices * @devs - pointer to pmx device array * @count - number of devices to enable * * It will enable pads for all required peripherals once and only once. * If peripheral is not supported by current mode then request is rejected. * Conflicts between peripherals are not handled and peripherals will be * enabled in the order they are present in pmx_dev array. * In case of conflicts last peripheral enabled will be present. * Returns -ve on Err otherwise 0 */ static int pmx_devs_enable(struct pmx_dev **devs, u8 count) { u32 val, i, mask; if (!count) return -EINVAL; val = readl(pmx->base + pmx->mux_reg.offset); for (i = 0; i < count; i++) { u8 j = 0; if (!devs[i]->name || !devs[i]->modes) { printk(KERN_ERR "padmux: dev name or modes is null\n"); continue; } /* check if peripheral exists in active mode */ if (pmx->active_mode) { bool found = false; for (j = 0; j < devs[i]->mode_count; j++) { if (devs[i]->modes[j].ids & pmx->active_mode->id) { found = true; break; } } if (found == false) { printk(KERN_ERR "%s device not available in %s"\ "mode\n", devs[i]->name, pmx->active_mode->name); continue; } } /* enable peripheral */ mask = devs[i]->modes[j].mask & pmx->mux_reg.mask; if (devs[i]->enb_on_reset) val &= ~mask; else val |= mask; devs[i]->is_active = true; } writel(val, pmx->base + pmx->mux_reg.offset); kfree(pmx); /* this will ensure that multiplexing can't be changed now */ pmx = (struct pmx *)-1; return 0; } /** * pmx_register - registers a platform requesting pad mux feature * @driver - pointer to driver structure containing driver specific parameters * * Also this must be called only once. This will allocate memory for pmx * structure, will call pmx_mode_set, will call pmx_devs_enable. * Returns -ve on Err otherwise 0 */ int pmx_register(struct pmx_driver *driver) { int ret = 0; if (pmx) return -EPERM; if (!driver->base || !driver->devs) return -EFAULT; pmx = kzalloc(sizeof(*pmx), GFP_KERNEL); if (!pmx) return -ENOMEM; pmx->base = (u32)driver->base; pmx->mode_reg.offset = driver->mode_reg.offset; pmx->mode_reg.mask = driver->mode_reg.mask; pmx->mux_reg.offset = driver->mux_reg.offset; pmx->mux_reg.mask = driver->mux_reg.mask; /* choose mode to enable */ if (driver->mode) { ret = pmx_mode_set(driver->mode); if (ret) goto pmx_fail; } ret = pmx_devs_enable(driver->devs, driver->devs_count); if (ret) goto pmx_fail; return 0; pmx_fail: return ret; }
gpl-2.0
CyanogenMod/android_kernel_htc_msm8660
sound/pci/echoaudio/echoaudio_dsp.c
8872
30597
/**************************************************************************** Copyright Echo Digital Audio Corporation (c) 1998 - 2004 All rights reserved www.echoaudio.com This file is part of Echo Digital Audio's generic driver library. Echo Digital Audio's generic driver library is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ************************************************************************* Translation from C++ and adaptation for use in ALSA-Driver were made by Giuliano Pochini <pochini@shiny.it> ****************************************************************************/ #if PAGE_SIZE < 4096 #error PAGE_SIZE is < 4k #endif static int restore_dsp_rettings(struct echoaudio *chip); /* Some vector commands involve the DSP reading or writing data to and from the comm page; if you send one of these commands to the DSP, it will complete the command and then write a non-zero value to the Handshake field in the comm page. This function waits for the handshake to show up. */ static int wait_handshake(struct echoaudio *chip) { int i; /* Wait up to 20ms for the handshake from the DSP */ for (i = 0; i < HANDSHAKE_TIMEOUT; i++) { /* Look for the handshake value */ barrier(); if (chip->comm_page->handshake) { return 0; } udelay(1); } snd_printk(KERN_ERR "wait_handshake(): Timeout waiting for DSP\n"); return -EBUSY; } /* Much of the interaction between the DSP and the driver is done via vector commands; send_vector writes a vector command to the DSP. Typically, this causes the DSP to read or write fields in the comm page. PCI posting is not required thanks to the handshake logic. */ static int send_vector(struct echoaudio *chip, u32 command) { int i; wmb(); /* Flush all pending writes before sending the command */ /* Wait up to 100ms for the "vector busy" bit to be off */ for (i = 0; i < VECTOR_BUSY_TIMEOUT; i++) { if (!(get_dsp_register(chip, CHI32_VECTOR_REG) & CHI32_VECTOR_BUSY)) { set_dsp_register(chip, CHI32_VECTOR_REG, command); /*if (i) DE_ACT(("send_vector time: %d\n", i));*/ return 0; } udelay(1); } DE_ACT((KERN_ERR "timeout on send_vector\n")); return -EBUSY; } /* write_dsp writes a 32-bit value to the DSP; this is used almost exclusively for loading the DSP. */ static int write_dsp(struct echoaudio *chip, u32 data) { u32 status, i; for (i = 0; i < 10000000; i++) { /* timeout = 10s */ status = get_dsp_register(chip, CHI32_STATUS_REG); if ((status & CHI32_STATUS_HOST_WRITE_EMPTY) != 0) { set_dsp_register(chip, CHI32_DATA_REG, data); wmb(); /* write it immediately */ return 0; } udelay(1); cond_resched(); } chip->bad_board = TRUE; /* Set TRUE until DSP re-loaded */ DE_ACT((KERN_ERR "write_dsp: Set bad_board to TRUE\n")); return -EIO; } /* read_dsp reads a 32-bit value from the DSP; this is used almost exclusively for loading the DSP and checking the status of the ASIC. */ static int read_dsp(struct echoaudio *chip, u32 *data) { u32 status, i; for (i = 0; i < READ_DSP_TIMEOUT; i++) { status = get_dsp_register(chip, CHI32_STATUS_REG); if ((status & CHI32_STATUS_HOST_READ_FULL) != 0) { *data = get_dsp_register(chip, CHI32_DATA_REG); return 0; } udelay(1); cond_resched(); } chip->bad_board = TRUE; /* Set TRUE until DSP re-loaded */ DE_INIT((KERN_ERR "read_dsp: Set bad_board to TRUE\n")); return -EIO; } /**************************************************************************** Firmware loading functions ****************************************************************************/ /* This function is used to read back the serial number from the DSP; this is triggered by the SET_COMMPAGE_ADDR command. Only some early Echogals products have serial numbers in the ROM; the serial number is not used, but you still need to do this as part of the DSP load process. */ static int read_sn(struct echoaudio *chip) { int i; u32 sn[6]; for (i = 0; i < 5; i++) { if (read_dsp(chip, &sn[i])) { snd_printk(KERN_ERR "Failed to read serial number\n"); return -EIO; } } DE_INIT(("Read serial number %08x %08x %08x %08x %08x\n", sn[0], sn[1], sn[2], sn[3], sn[4])); return 0; } #ifndef ECHOCARD_HAS_ASIC /* This card has no ASIC, just return ok */ static inline int check_asic_status(struct echoaudio *chip) { chip->asic_loaded = TRUE; return 0; } #endif /* !ECHOCARD_HAS_ASIC */ #ifdef ECHOCARD_HAS_ASIC /* Load ASIC code - done after the DSP is loaded */ static int load_asic_generic(struct echoaudio *chip, u32 cmd, short asic) { const struct firmware *fw; int err; u32 i, size; u8 *code; err = get_firmware(&fw, chip, asic); if (err < 0) { snd_printk(KERN_WARNING "Firmware not found !\n"); return err; } code = (u8 *)fw->data; size = fw->size; /* Send the "Here comes the ASIC" command */ if (write_dsp(chip, cmd) < 0) goto la_error; /* Write length of ASIC file in bytes */ if (write_dsp(chip, size) < 0) goto la_error; for (i = 0; i < size; i++) { if (write_dsp(chip, code[i]) < 0) goto la_error; } DE_INIT(("ASIC loaded\n")); free_firmware(fw); return 0; la_error: DE_INIT(("failed on write_dsp\n")); free_firmware(fw); return -EIO; } #endif /* ECHOCARD_HAS_ASIC */ #ifdef DSP_56361 /* Install the resident loader for 56361 DSPs; The resident loader is on the EPROM on the board for 56301 DSP. The resident loader is a tiny little program that is used to load the real DSP code. */ static int install_resident_loader(struct echoaudio *chip) { u32 address; int index, words, i; u16 *code; u32 status; const struct firmware *fw; /* 56361 cards only! This check is required by the old 56301-based Mona and Gina24 */ if (chip->device_id != DEVICE_ID_56361) return 0; /* Look to see if the resident loader is present. If the resident loader is already installed, host flag 5 will be on. */ status = get_dsp_register(chip, CHI32_STATUS_REG); if (status & CHI32_STATUS_REG_HF5) { DE_INIT(("Resident loader already installed; status is 0x%x\n", status)); return 0; } i = get_firmware(&fw, chip, FW_361_LOADER); if (i < 0) { snd_printk(KERN_WARNING "Firmware not found !\n"); return i; } /* The DSP code is an array of 16 bit words. The array is divided up into sections. The first word of each section is the size in words, followed by the section type. Since DSP addresses and data are 24 bits wide, they each take up two 16 bit words in the array. This is a lot like the other loader loop, but it's not a loop, you don't write the memory type, and you don't write a zero at the end. */ /* Set DSP format bits for 24 bit mode */ set_dsp_register(chip, CHI32_CONTROL_REG, get_dsp_register(chip, CHI32_CONTROL_REG) | 0x900); code = (u16 *)fw->data; /* Skip the header section; the first word in the array is the size of the first section, so the first real section of code is pointed to by Code[0]. */ index = code[0]; /* Skip the section size, LRS block type, and DSP memory type */ index += 3; /* Get the number of DSP words to write */ words = code[index++]; /* Get the DSP address for this block; 24 bits, so build from two words */ address = ((u32)code[index] << 16) + code[index + 1]; index += 2; /* Write the count to the DSP */ if (write_dsp(chip, words)) { DE_INIT(("install_resident_loader: Failed to write word count!\n")); goto irl_error; } /* Write the DSP address */ if (write_dsp(chip, address)) { DE_INIT(("install_resident_loader: Failed to write DSP address!\n")); goto irl_error; } /* Write out this block of code to the DSP */ for (i = 0; i < words; i++) { u32 data; data = ((u32)code[index] << 16) + code[index + 1]; if (write_dsp(chip, data)) { DE_INIT(("install_resident_loader: Failed to write DSP code\n")); goto irl_error; } index += 2; } /* Wait for flag 5 to come up */ for (i = 0; i < 200; i++) { /* Timeout is 50us * 200 = 10ms */ udelay(50); status = get_dsp_register(chip, CHI32_STATUS_REG); if (status & CHI32_STATUS_REG_HF5) break; } if (i == 200) { DE_INIT(("Resident loader failed to set HF5\n")); goto irl_error; } DE_INIT(("Resident loader successfully installed\n")); free_firmware(fw); return 0; irl_error: free_firmware(fw); return -EIO; } #endif /* DSP_56361 */ static int load_dsp(struct echoaudio *chip, u16 *code) { u32 address, data; int index, words, i; if (chip->dsp_code == code) { DE_INIT(("DSP is already loaded!\n")); return 0; } chip->bad_board = TRUE; /* Set TRUE until DSP loaded */ chip->dsp_code = NULL; /* Current DSP code not loaded */ chip->asic_loaded = FALSE; /* Loading the DSP code will reset the ASIC */ DE_INIT(("load_dsp: Set bad_board to TRUE\n")); /* If this board requires a resident loader, install it. */ #ifdef DSP_56361 if ((i = install_resident_loader(chip)) < 0) return i; #endif /* Send software reset command */ if (send_vector(chip, DSP_VC_RESET) < 0) { DE_INIT(("LoadDsp: send_vector DSP_VC_RESET failed, Critical Failure\n")); return -EIO; } /* Delay 10us */ udelay(10); /* Wait 10ms for HF3 to indicate that software reset is complete */ for (i = 0; i < 1000; i++) { /* Timeout is 10us * 1000 = 10ms */ if (get_dsp_register(chip, CHI32_STATUS_REG) & CHI32_STATUS_REG_HF3) break; udelay(10); } if (i == 1000) { DE_INIT(("load_dsp: Timeout waiting for CHI32_STATUS_REG_HF3\n")); return -EIO; } /* Set DSP format bits for 24 bit mode now that soft reset is done */ set_dsp_register(chip, CHI32_CONTROL_REG, get_dsp_register(chip, CHI32_CONTROL_REG) | 0x900); /* Main loader loop */ index = code[0]; for (;;) { int block_type, mem_type; /* Total Block Size */ index++; /* Block Type */ block_type = code[index]; if (block_type == 4) /* We're finished */ break; index++; /* Memory Type P=0,X=1,Y=2 */ mem_type = code[index++]; /* Block Code Size */ words = code[index++]; if (words == 0) /* We're finished */ break; /* Start Address */ address = ((u32)code[index] << 16) + code[index + 1]; index += 2; if (write_dsp(chip, words) < 0) { DE_INIT(("load_dsp: failed to write number of DSP words\n")); return -EIO; } if (write_dsp(chip, address) < 0) { DE_INIT(("load_dsp: failed to write DSP address\n")); return -EIO; } if (write_dsp(chip, mem_type) < 0) { DE_INIT(("load_dsp: failed to write DSP memory type\n")); return -EIO; } /* Code */ for (i = 0; i < words; i++, index+=2) { data = ((u32)code[index] << 16) + code[index + 1]; if (write_dsp(chip, data) < 0) { DE_INIT(("load_dsp: failed to write DSP data\n")); return -EIO; } } } if (write_dsp(chip, 0) < 0) { /* We're done!!! */ DE_INIT(("load_dsp: Failed to write final zero\n")); return -EIO; } udelay(10); for (i = 0; i < 5000; i++) { /* Timeout is 100us * 5000 = 500ms */ /* Wait for flag 4 - indicates that the DSP loaded OK */ if (get_dsp_register(chip, CHI32_STATUS_REG) & CHI32_STATUS_REG_HF4) { set_dsp_register(chip, CHI32_CONTROL_REG, get_dsp_register(chip, CHI32_CONTROL_REG) & ~0x1b00); if (write_dsp(chip, DSP_FNC_SET_COMMPAGE_ADDR) < 0) { DE_INIT(("load_dsp: Failed to write DSP_FNC_SET_COMMPAGE_ADDR\n")); return -EIO; } if (write_dsp(chip, chip->comm_page_phys) < 0) { DE_INIT(("load_dsp: Failed to write comm page address\n")); return -EIO; } /* Get the serial number via slave mode. This is triggered by the SET_COMMPAGE_ADDR command. We don't actually use the serial number but we have to get it as part of the DSP init voodoo. */ if (read_sn(chip) < 0) { DE_INIT(("load_dsp: Failed to read serial number\n")); return -EIO; } chip->dsp_code = code; /* Show which DSP code loaded */ chip->bad_board = FALSE; /* DSP OK */ DE_INIT(("load_dsp: OK!\n")); return 0; } udelay(100); } DE_INIT(("load_dsp: DSP load timed out waiting for HF4\n")); return -EIO; } /* load_firmware takes care of loading the DSP and any ASIC code. */ static int load_firmware(struct echoaudio *chip) { const struct firmware *fw; int box_type, err; if (snd_BUG_ON(!chip->comm_page)) return -EPERM; /* See if the ASIC is present and working - only if the DSP is already loaded */ if (chip->dsp_code) { if ((box_type = check_asic_status(chip)) >= 0) return box_type; /* ASIC check failed; force the DSP to reload */ chip->dsp_code = NULL; } err = get_firmware(&fw, chip, chip->dsp_code_to_load); if (err < 0) return err; err = load_dsp(chip, (u16 *)fw->data); free_firmware(fw); if (err < 0) return err; if ((box_type = load_asic(chip)) < 0) return box_type; /* error */ return box_type; } /**************************************************************************** Mixer functions ****************************************************************************/ #if defined(ECHOCARD_HAS_INPUT_NOMINAL_LEVEL) || \ defined(ECHOCARD_HAS_OUTPUT_NOMINAL_LEVEL) /* Set the nominal level for an input or output bus (true = -10dBV, false = +4dBu) */ static int set_nominal_level(struct echoaudio *chip, u16 index, char consumer) { if (snd_BUG_ON(index >= num_busses_out(chip) + num_busses_in(chip))) return -EINVAL; /* Wait for the handshake (OK even if ASIC is not loaded) */ if (wait_handshake(chip)) return -EIO; chip->nominal_level[index] = consumer; if (consumer) chip->comm_page->nominal_level_mask |= cpu_to_le32(1 << index); else chip->comm_page->nominal_level_mask &= ~cpu_to_le32(1 << index); return 0; } #endif /* ECHOCARD_HAS_*_NOMINAL_LEVEL */ /* Set the gain for a single physical output channel (dB). */ static int set_output_gain(struct echoaudio *chip, u16 channel, s8 gain) { if (snd_BUG_ON(channel >= num_busses_out(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; /* Save the new value */ chip->output_gain[channel] = gain; chip->comm_page->line_out_level[channel] = gain; return 0; } #ifdef ECHOCARD_HAS_MONITOR /* Set the monitor level from an input bus to an output bus. */ static int set_monitor_gain(struct echoaudio *chip, u16 output, u16 input, s8 gain) { if (snd_BUG_ON(output >= num_busses_out(chip) || input >= num_busses_in(chip))) return -EINVAL; if (wait_handshake(chip)) return -EIO; chip->monitor_gain[output][input] = gain; chip->comm_page->monitors[monitor_index(chip, output, input)] = gain; return 0; } #endif /* ECHOCARD_HAS_MONITOR */ /* Tell the DSP to read and update output, nominal & monitor levels in comm page. */ static int update_output_line_level(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_OUTVOL); } /* Tell the DSP to read and update input levels in comm page */ static int update_input_line_level(struct echoaudio *chip) { if (wait_handshake(chip)) return -EIO; clear_handshake(chip); return send_vector(chip, DSP_VC_UPDATE_INGAIN); } /* set_meters_on turns the meters on or off. If meters are turned on, the DSP will write the meter and clock detect values to the comm page at about 30Hz */ static void set_meters_on(struct echoaudio *chip, char on) { if (on && !chip->meters_enabled) { send_vector(chip, DSP_VC_METERS_ON); chip->meters_enabled = 1; } else if (!on && chip->meters_enabled) { send_vector(chip, DSP_VC_METERS_OFF); chip->meters_enabled = 0; memset((s8 *)chip->comm_page->vu_meter, ECHOGAIN_MUTED, DSP_MAXPIPES); memset((s8 *)chip->comm_page->peak_meter, ECHOGAIN_MUTED, DSP_MAXPIPES); } } /* Fill out an the given array using the current values in the comm page. Meters are written in the comm page by the DSP in this order: Output busses Input busses Output pipes (vmixer cards only) This function assumes there are no more than 16 in/out busses or pipes Meters is an array [3][16][2] of long. */ static void get_audio_meters(struct echoaudio *chip, long *meters) { int i, m, n; m = 0; n = 0; for (i = 0; i < num_busses_out(chip); i++, m++) { meters[n++] = chip->comm_page->vu_meter[m]; meters[n++] = chip->comm_page->peak_meter[m]; } for (; n < 32; n++) meters[n] = 0; #ifdef ECHOCARD_ECHO3G m = E3G_MAX_OUTPUTS; /* Skip unused meters */ #endif for (i = 0; i < num_busses_in(chip); i++, m++) { meters[n++] = chip->comm_page->vu_meter[m]; meters[n++] = chip->comm_page->peak_meter[m]; } for (; n < 64; n++) meters[n] = 0; #ifdef ECHOCARD_HAS_VMIXER for (i = 0; i < num_pipes_out(chip); i++, m++) { meters[n++] = chip->comm_page->vu_meter[m]; meters[n++] = chip->comm_page->peak_meter[m]; } #endif for (; n < 96; n++) meters[n] = 0; } static int restore_dsp_rettings(struct echoaudio *chip) { int i, o, err; DE_INIT(("restore_dsp_settings\n")); if ((err = check_asic_status(chip)) < 0) return err; /* Gina20/Darla20 only. Should be harmless for other cards. */ chip->comm_page->gd_clock_state = GD_CLOCK_UNDEF; chip->comm_page->gd_spdif_status = GD_SPDIF_STATUS_UNDEF; chip->comm_page->handshake = 0xffffffff; /* Restore output busses */ for (i = 0; i < num_busses_out(chip); i++) { err = set_output_gain(chip, i, chip->output_gain[i]); if (err < 0) return err; } #ifdef ECHOCARD_HAS_VMIXER for (i = 0; i < num_pipes_out(chip); i++) for (o = 0; o < num_busses_out(chip); o++) { err = set_vmixer_gain(chip, o, i, chip->vmixer_gain[o][i]); if (err < 0) return err; } if (update_vmixer_level(chip) < 0) return -EIO; #endif /* ECHOCARD_HAS_VMIXER */ #ifdef ECHOCARD_HAS_MONITOR for (o = 0; o < num_busses_out(chip); o++) for (i = 0; i < num_busses_in(chip); i++) { err = set_monitor_gain(chip, o, i, chip->monitor_gain[o][i]); if (err < 0) return err; } #endif /* ECHOCARD_HAS_MONITOR */ #ifdef ECHOCARD_HAS_INPUT_GAIN for (i = 0; i < num_busses_in(chip); i++) { err = set_input_gain(chip, i, chip->input_gain[i]); if (err < 0) return err; } #endif /* ECHOCARD_HAS_INPUT_GAIN */ err = update_output_line_level(chip); if (err < 0) return err; err = update_input_line_level(chip); if (err < 0) return err; err = set_sample_rate(chip, chip->sample_rate); if (err < 0) return err; if (chip->meters_enabled) { err = send_vector(chip, DSP_VC_METERS_ON); if (err < 0) return err; } #ifdef ECHOCARD_HAS_DIGITAL_MODE_SWITCH if (set_digital_mode(chip, chip->digital_mode) < 0) return -EIO; #endif #ifdef ECHOCARD_HAS_DIGITAL_IO if (set_professional_spdif(chip, chip->professional_spdif) < 0) return -EIO; #endif #ifdef ECHOCARD_HAS_PHANTOM_POWER if (set_phantom_power(chip, chip->phantom_power) < 0) return -EIO; #endif #ifdef ECHOCARD_HAS_EXTERNAL_CLOCK /* set_input_clock() also restores automute setting */ if (set_input_clock(chip, chip->input_clock) < 0) return -EIO; #endif #ifdef ECHOCARD_HAS_OUTPUT_CLOCK_SWITCH if (set_output_clock(chip, chip->output_clock) < 0) return -EIO; #endif if (wait_handshake(chip) < 0) return -EIO; clear_handshake(chip); if (send_vector(chip, DSP_VC_UPDATE_FLAGS) < 0) return -EIO; DE_INIT(("restore_dsp_rettings done\n")); return 0; } /**************************************************************************** Transport functions ****************************************************************************/ /* set_audio_format() sets the format of the audio data in host memory for this pipe. Note that _MS_ (mono-to-stereo) playback modes are not used by ALSA but they are here because they are just mono while capturing */ static void set_audio_format(struct echoaudio *chip, u16 pipe_index, const struct audioformat *format) { u16 dsp_format; dsp_format = DSP_AUDIOFORM_SS_16LE; /* Look for super-interleave (no big-endian and 8 bits) */ if (format->interleave > 2) { switch (format->bits_per_sample) { case 16: dsp_format = DSP_AUDIOFORM_SUPER_INTERLEAVE_16LE; break; case 24: dsp_format = DSP_AUDIOFORM_SUPER_INTERLEAVE_24LE; break; case 32: dsp_format = DSP_AUDIOFORM_SUPER_INTERLEAVE_32LE; break; } dsp_format |= format->interleave; } else if (format->data_are_bigendian) { /* For big-endian data, only 32 bit samples are supported */ switch (format->interleave) { case 1: dsp_format = DSP_AUDIOFORM_MM_32BE; break; #ifdef ECHOCARD_HAS_STEREO_BIG_ENDIAN32 case 2: dsp_format = DSP_AUDIOFORM_SS_32BE; break; #endif } } else if (format->interleave == 1 && format->bits_per_sample == 32 && !format->mono_to_stereo) { /* 32 bit little-endian mono->mono case */ dsp_format = DSP_AUDIOFORM_MM_32LE; } else { /* Handle the other little-endian formats */ switch (format->bits_per_sample) { case 8: if (format->interleave == 2) dsp_format = DSP_AUDIOFORM_SS_8; else dsp_format = DSP_AUDIOFORM_MS_8; break; default: case 16: if (format->interleave == 2) dsp_format = DSP_AUDIOFORM_SS_16LE; else dsp_format = DSP_AUDIOFORM_MS_16LE; break; case 24: if (format->interleave == 2) dsp_format = DSP_AUDIOFORM_SS_24LE; else dsp_format = DSP_AUDIOFORM_MS_24LE; break; case 32: if (format->interleave == 2) dsp_format = DSP_AUDIOFORM_SS_32LE; else dsp_format = DSP_AUDIOFORM_MS_32LE; break; } } DE_ACT(("set_audio_format[%d] = %x\n", pipe_index, dsp_format)); chip->comm_page->audio_format[pipe_index] = cpu_to_le16(dsp_format); } /* start_transport starts transport for a set of pipes. The bits 1 in channel_mask specify what pipes to start. Only the bit of the first channel must be set, regardless its interleave. Same thing for pause_ and stop_ -trasport below. */ static int start_transport(struct echoaudio *chip, u32 channel_mask, u32 cyclic_mask) { DE_ACT(("start_transport %x\n", channel_mask)); if (wait_handshake(chip)) return -EIO; chip->comm_page->cmd_start |= cpu_to_le32(channel_mask); if (chip->comm_page->cmd_start) { clear_handshake(chip); send_vector(chip, DSP_VC_START_TRANSFER); if (wait_handshake(chip)) return -EIO; /* Keep track of which pipes are transporting */ chip->active_mask |= channel_mask; chip->comm_page->cmd_start = 0; return 0; } DE_ACT(("start_transport: No pipes to start!\n")); return -EINVAL; } static int pause_transport(struct echoaudio *chip, u32 channel_mask) { DE_ACT(("pause_transport %x\n", channel_mask)); if (wait_handshake(chip)) return -EIO; chip->comm_page->cmd_stop |= cpu_to_le32(channel_mask); chip->comm_page->cmd_reset = 0; if (chip->comm_page->cmd_stop) { clear_handshake(chip); send_vector(chip, DSP_VC_STOP_TRANSFER); if (wait_handshake(chip)) return -EIO; /* Keep track of which pipes are transporting */ chip->active_mask &= ~channel_mask; chip->comm_page->cmd_stop = 0; chip->comm_page->cmd_reset = 0; return 0; } DE_ACT(("pause_transport: No pipes to stop!\n")); return 0; } static int stop_transport(struct echoaudio *chip, u32 channel_mask) { DE_ACT(("stop_transport %x\n", channel_mask)); if (wait_handshake(chip)) return -EIO; chip->comm_page->cmd_stop |= cpu_to_le32(channel_mask); chip->comm_page->cmd_reset |= cpu_to_le32(channel_mask); if (chip->comm_page->cmd_reset) { clear_handshake(chip); send_vector(chip, DSP_VC_STOP_TRANSFER); if (wait_handshake(chip)) return -EIO; /* Keep track of which pipes are transporting */ chip->active_mask &= ~channel_mask; chip->comm_page->cmd_stop = 0; chip->comm_page->cmd_reset = 0; return 0; } DE_ACT(("stop_transport: No pipes to stop!\n")); return 0; } static inline int is_pipe_allocated(struct echoaudio *chip, u16 pipe_index) { return (chip->pipe_alloc_mask & (1 << pipe_index)); } /* Stops everything and turns off the DSP. All pipes should be already stopped and unallocated. */ static int rest_in_peace(struct echoaudio *chip) { DE_ACT(("rest_in_peace() open=%x\n", chip->pipe_alloc_mask)); /* Stops all active pipes (just to be sure) */ stop_transport(chip, chip->active_mask); set_meters_on(chip, FALSE); #ifdef ECHOCARD_HAS_MIDI enable_midi_input(chip, FALSE); #endif /* Go to sleep */ if (chip->dsp_code) { /* Make load_firmware do a complete reload */ chip->dsp_code = NULL; /* Put the DSP to sleep */ return send_vector(chip, DSP_VC_GO_COMATOSE); } return 0; } /* Fills the comm page with default values */ static int init_dsp_comm_page(struct echoaudio *chip) { /* Check if the compiler added extra padding inside the structure */ if (offsetof(struct comm_page, midi_output) != 0xbe0) { DE_INIT(("init_dsp_comm_page() - Invalid struct comm_page structure\n")); return -EPERM; } /* Init all the basic stuff */ chip->card_name = ECHOCARD_NAME; chip->bad_board = TRUE; /* Set TRUE until DSP loaded */ chip->dsp_code = NULL; /* Current DSP code not loaded */ chip->asic_loaded = FALSE; memset(chip->comm_page, 0, sizeof(struct comm_page)); /* Init the comm page */ chip->comm_page->comm_size = cpu_to_le32(sizeof(struct comm_page)); chip->comm_page->handshake = 0xffffffff; chip->comm_page->midi_out_free_count = cpu_to_le32(DSP_MIDI_OUT_FIFO_SIZE); chip->comm_page->sample_rate = cpu_to_le32(44100); /* Set line levels so we don't blast any inputs on startup */ memset(chip->comm_page->monitors, ECHOGAIN_MUTED, MONITOR_ARRAY_SIZE); memset(chip->comm_page->vmixer, ECHOGAIN_MUTED, VMIXER_ARRAY_SIZE); return 0; } /* This function initializes the chip structure with default values, ie. all * muted and internal clock source. Then it copies the settings to the DSP. * This MUST be called after the DSP is up and running ! */ static int init_line_levels(struct echoaudio *chip) { DE_INIT(("init_line_levels\n")); memset(chip->output_gain, ECHOGAIN_MUTED, sizeof(chip->output_gain)); memset(chip->input_gain, ECHOGAIN_MUTED, sizeof(chip->input_gain)); memset(chip->monitor_gain, ECHOGAIN_MUTED, sizeof(chip->monitor_gain)); memset(chip->vmixer_gain, ECHOGAIN_MUTED, sizeof(chip->vmixer_gain)); chip->input_clock = ECHO_CLOCK_INTERNAL; chip->output_clock = ECHO_CLOCK_WORD; chip->sample_rate = 44100; return restore_dsp_rettings(chip); } /* This is low level part of the interrupt handler. It returns -1 if the IRQ is not ours, or N>=0 if it is, where N is the number of midi data in the input queue. */ static int service_irq(struct echoaudio *chip) { int st; /* Read the DSP status register and see if this DSP generated this interrupt */ if (get_dsp_register(chip, CHI32_STATUS_REG) & CHI32_STATUS_IRQ) { st = 0; #ifdef ECHOCARD_HAS_MIDI /* Get and parse midi data if present */ if (chip->comm_page->midi_input[0]) /* The count is at index 0 */ st = midi_service_irq(chip); /* Returns how many midi bytes we received */ #endif /* Clear the hardware interrupt */ chip->comm_page->midi_input[0] = 0; send_vector(chip, DSP_VC_ACK_INT); return st; } return -1; } /****************************************************************************** Functions for opening and closing pipes ******************************************************************************/ /* allocate_pipes is used to reserve audio pipes for your exclusive use. The call will fail if some pipes are already allocated. */ static int allocate_pipes(struct echoaudio *chip, struct audiopipe *pipe, int pipe_index, int interleave) { int i; u32 channel_mask; char is_cyclic; DE_ACT(("allocate_pipes: ch=%d int=%d\n", pipe_index, interleave)); if (chip->bad_board) return -EIO; is_cyclic = 1; /* This driver uses cyclic buffers only */ for (channel_mask = i = 0; i < interleave; i++) channel_mask |= 1 << (pipe_index + i); if (chip->pipe_alloc_mask & channel_mask) { DE_ACT(("allocate_pipes: channel already open\n")); return -EAGAIN; } chip->comm_page->position[pipe_index] = 0; chip->pipe_alloc_mask |= channel_mask; if (is_cyclic) chip->pipe_cyclic_mask |= channel_mask; pipe->index = pipe_index; pipe->interleave = interleave; pipe->state = PIPE_STATE_STOPPED; /* The counter register is where the DSP writes the 32 bit DMA position for a pipe. The DSP is constantly updating this value as it moves data. The DMA counter is in units of bytes, not samples. */ pipe->dma_counter = &chip->comm_page->position[pipe_index]; *pipe->dma_counter = 0; DE_ACT(("allocate_pipes: ok\n")); return pipe_index; } static int free_pipes(struct echoaudio *chip, struct audiopipe *pipe) { u32 channel_mask; int i; DE_ACT(("free_pipes: Pipe %d\n", pipe->index)); if (snd_BUG_ON(!is_pipe_allocated(chip, pipe->index))) return -EINVAL; if (snd_BUG_ON(pipe->state != PIPE_STATE_STOPPED)) return -EINVAL; for (channel_mask = i = 0; i < pipe->interleave; i++) channel_mask |= 1 << (pipe->index + i); chip->pipe_alloc_mask &= ~channel_mask; chip->pipe_cyclic_mask &= ~channel_mask; return 0; } /****************************************************************************** Functions for managing the scatter-gather list ******************************************************************************/ static int sglist_init(struct echoaudio *chip, struct audiopipe *pipe) { pipe->sglist_head = 0; memset(pipe->sgpage.area, 0, PAGE_SIZE); chip->comm_page->sglist_addr[pipe->index].addr = cpu_to_le32(pipe->sgpage.addr); return 0; } static int sglist_add_mapping(struct echoaudio *chip, struct audiopipe *pipe, dma_addr_t address, size_t length) { int head = pipe->sglist_head; struct sg_entry *list = (struct sg_entry *)pipe->sgpage.area; if (head < MAX_SGLIST_ENTRIES - 1) { list[head].addr = cpu_to_le32(address); list[head].size = cpu_to_le32(length); pipe->sglist_head++; } else { DE_ACT(("SGlist: too many fragments\n")); return -ENOMEM; } return 0; } static inline int sglist_add_irq(struct echoaudio *chip, struct audiopipe *pipe) { return sglist_add_mapping(chip, pipe, 0, 0); } static inline int sglist_wrap(struct echoaudio *chip, struct audiopipe *pipe) { return sglist_add_mapping(chip, pipe, pipe->sgpage.addr, 0); }
gpl-2.0
boa19861105/Butterfly-S-Sense-4.4.3
fs/nfsd/auth.c
9128
2038
/* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/sched.h> #include "nfsd.h" #include "auth.h" int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) { struct exp_flavor_info *f; struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors; for (f = exp->ex_flavors; f < end; f++) { if (f->pseudoflavor == rqstp->rq_flavor) return f->flags; } return exp->ex_flags; } int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) { struct group_info *rqgi; struct group_info *gi; struct cred *new; int i; int flags = nfsexp_flags(rqstp, exp); int ret; validate_process_creds(); /* discard any old override before preparing the new set */ revert_creds(get_cred(current->real_cred)); new = prepare_creds(); if (!new) return -ENOMEM; new->fsuid = rqstp->rq_cred.cr_uid; new->fsgid = rqstp->rq_cred.cr_gid; rqgi = rqstp->rq_cred.cr_group_info; if (flags & NFSEXP_ALLSQUASH) { new->fsuid = exp->ex_anon_uid; new->fsgid = exp->ex_anon_gid; gi = groups_alloc(0); if (!gi) goto oom; } else if (flags & NFSEXP_ROOTSQUASH) { if (!new->fsuid) new->fsuid = exp->ex_anon_uid; if (!new->fsgid) new->fsgid = exp->ex_anon_gid; gi = groups_alloc(rqgi->ngroups); if (!gi) goto oom; for (i = 0; i < rqgi->ngroups; i++) { if (!GROUP_AT(rqgi, i)) GROUP_AT(gi, i) = exp->ex_anon_gid; else GROUP_AT(gi, i) = GROUP_AT(rqgi, i); } } else { gi = get_group_info(rqgi); } if (new->fsuid == (uid_t) -1) new->fsuid = exp->ex_anon_uid; if (new->fsgid == (gid_t) -1) new->fsgid = exp->ex_anon_gid; ret = set_groups(new, gi); put_group_info(gi); if (ret < 0) goto error; if (new->fsuid) new->cap_effective = cap_drop_nfsd_set(new->cap_effective); else new->cap_effective = cap_raise_nfsd_set(new->cap_effective, new->cap_permitted); validate_process_creds(); put_cred(override_creds(new)); put_cred(new); validate_process_creds(); return 0; oom: ret = -ENOMEM; error: abort_creds(new); return ret; }
gpl-2.0
dexter93/kernel_htc_msm8660_old
arch/powerpc/math-emu/fdiv.c
13736
1157
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fdiv(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_VXZDZ); #ifdef DEBUG printk("%s: FPSCR_VXZDZ raised\n", __func__); #endif } if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { FP_SET_EXCEPTION(EFLAG_VXIDI); #ifdef DEBUG printk("%s: FPSCR_VXIDI raised\n", __func__); #endif } if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_DIVZERO); if (__FPU_TRAP_P(EFLAG_DIVZERO)) return FP_CUR_EXCEPTIONS; } FP_DIV_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
luckpizza/n8000-kernel-aufs
arch/powerpc/math-emu/fnmsub.c
13736
1161
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fnmsub(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
ricardon/omap-audio
arch/powerpc/math-emu/fnmsubs.c
13736
1192
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fnmsubs(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
omnirom/android_kernel_samsung_t1
arch/powerpc/math-emu/fnmadd.c
13736
1138
#include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fnmadd(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
gpl-2.0
vidyaravipati/net-next-rocker
fs/nfs/blocklayout/blocklayout.c
169
38253
/* * linux/fs/nfs/blocklayout/blocklayout.c * * Module for the NFSv4.1 pNFS block layout driver. * * Copyright (c) 2006 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@citi.umich.edu> * Fred Isaman <iisaman@umich.edu> * * permission is granted to use, copy, create derivative works and * redistribute this software and such derivative works for any purpose, * so long as the name of the university of michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. if * the above copyright notice or any other identification of the * university of michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * this software is provided as is, without representation from the * university of michigan as to its fitness for any purpose, and without * warranty by the university of michigan of any kind, either express * or implied, including without limitation the implied warranties of * merchantability and fitness for a particular purpose. the regents * of the university of michigan shall not be liable for any damages, * including special, indirect, incidental, or consequential damages, * with respect to any claim arising out or in connection with the use * of the software, even if it has been or is hereafter advised of the * possibility of such damages. */ #include <linux/module.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/bio.h> /* struct bio */ #include <linux/buffer_head.h> /* various write calls */ #include <linux/prefetch.h> #include <linux/pagevec.h> #include "../pnfs.h" #include "../nfs4session.h" #include "../internal.h" #include "blocklayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD MODULE_LICENSE("GPL"); MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>"); MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver"); static void print_page(struct page *page) { dprintk("PRINTPAGE page %p\n", page); dprintk(" PagePrivate %d\n", PagePrivate(page)); dprintk(" PageUptodate %d\n", PageUptodate(page)); dprintk(" PageError %d\n", PageError(page)); dprintk(" PageDirty %d\n", PageDirty(page)); dprintk(" PageReferenced %d\n", PageReferenced(page)); dprintk(" PageLocked %d\n", PageLocked(page)); dprintk(" PageWriteback %d\n", PageWriteback(page)); dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page)); dprintk("\n"); } /* Given the be associated with isect, determine if page data needs to be * initialized. */ static int is_hole(struct pnfs_block_extent *be, sector_t isect) { if (be->be_state == PNFS_BLOCK_NONE_DATA) return 1; else if (be->be_state != PNFS_BLOCK_INVALID_DATA) return 0; else return !bl_is_sector_init(be->be_inval, isect); } /* Given the be associated with isect, determine if page data can be * written to disk. */ static int is_writable(struct pnfs_block_extent *be, sector_t isect) { return (be->be_state == PNFS_BLOCK_READWRITE_DATA || be->be_state == PNFS_BLOCK_INVALID_DATA); } /* The data we are handed might be spread across several bios. We need * to track when the last one is finished. */ struct parallel_io { struct kref refcnt; void (*pnfs_callback) (void *data, int num_se); void *data; int bse_count; }; static inline struct parallel_io *alloc_parallel(void *data) { struct parallel_io *rv; rv = kmalloc(sizeof(*rv), GFP_NOFS); if (rv) { rv->data = data; kref_init(&rv->refcnt); rv->bse_count = 0; } return rv; } static inline void get_parallel(struct parallel_io *p) { kref_get(&p->refcnt); } static void destroy_parallel(struct kref *kref) { struct parallel_io *p = container_of(kref, struct parallel_io, refcnt); dprintk("%s enter\n", __func__); p->pnfs_callback(p->data, p->bse_count); kfree(p); } static inline void put_parallel(struct parallel_io *p) { kref_put(&p->refcnt, destroy_parallel); } static struct bio * bl_submit_bio(int rw, struct bio *bio) { if (bio) { get_parallel(bio->bi_private); dprintk("%s submitting %s bio %u@%llu\n", __func__, rw == READ ? "read" : "write", bio->bi_iter.bi_size, (unsigned long long)bio->bi_iter.bi_sector); submit_bio(rw, bio); } return NULL; } static struct bio *bl_alloc_init_bio(int npg, sector_t isect, struct pnfs_block_extent *be, void (*end_io)(struct bio *, int err), struct parallel_io *par) { struct bio *bio; npg = min(npg, BIO_MAX_PAGES); bio = bio_alloc(GFP_NOIO, npg); if (!bio && (current->flags & PF_MEMALLOC)) { while (!bio && (npg /= 2)) bio = bio_alloc(GFP_NOIO, npg); } if (bio) { bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; bio->bi_bdev = be->be_mdev; bio->bi_end_io = end_io; bio->bi_private = par; } return bio; } static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, struct page *page, struct pnfs_block_extent *be, void (*end_io)(struct bio *, int err), struct parallel_io *par, unsigned int offset, int len) { isect = isect + (offset >> SECTOR_SHIFT); dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__, npg, rw, (unsigned long long)isect, offset, len); retry: if (!bio) { bio = bl_alloc_init_bio(npg, isect, be, end_io, par); if (!bio) return ERR_PTR(-ENOMEM); } if (bio_add_page(bio, page, len, offset) < len) { bio = bl_submit_bio(rw, bio); goto retry; } return bio; } static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, struct page *page, struct pnfs_block_extent *be, void (*end_io)(struct bio *, int err), struct parallel_io *par) { return do_add_page_to_bio(bio, npg, rw, isect, page, be, end_io, par, 0, PAGE_CACHE_SIZE); } /* This is basically copied from mpage_end_io_read */ static void bl_end_io_read(struct bio *bio, int err) { struct parallel_io *par = bio->bi_private; struct bio_vec *bvec; int i; if (!err) bio_for_each_segment_all(bvec, bio, i) SetPageUptodate(bvec->bv_page); if (err) { struct nfs_read_data *rdata = par->data; struct nfs_pgio_header *header = rdata->header; if (!header->pnfs_error) header->pnfs_error = -EIO; pnfs_set_lo_fail(header->lseg); } bio_put(bio); put_parallel(par); } static void bl_read_cleanup(struct work_struct *work) { struct rpc_task *task; struct nfs_read_data *rdata; dprintk("%s enter\n", __func__); task = container_of(work, struct rpc_task, u.tk_work); rdata = container_of(task, struct nfs_read_data, task); pnfs_ld_read_done(rdata); } static void bl_end_par_io_read(void *data, int unused) { struct nfs_read_data *rdata = data; rdata->task.tk_status = rdata->header->pnfs_error; INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); schedule_work(&rdata->task.u.tk_work); } static enum pnfs_try_status bl_read_pagelist(struct nfs_read_data *rdata) { struct nfs_pgio_header *header = rdata->header; int i, hole; struct bio *bio = NULL; struct pnfs_block_extent *be = NULL, *cow_read = NULL; sector_t isect, extent_length = 0; struct parallel_io *par; loff_t f_offset = rdata->args.offset; size_t bytes_left = rdata->args.count; unsigned int pg_offset, pg_len; struct page **pages = rdata->args.pages; int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; const bool is_dio = (header->dreq != NULL); dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, rdata->pages.npages, f_offset, (unsigned int)rdata->args.count); par = alloc_parallel(rdata); if (!par) goto use_mds; par->pnfs_callback = bl_end_par_io_read; /* At this point, we can no longer jump to use_mds */ isect = (sector_t) (f_offset >> SECTOR_SHIFT); /* Code assumes extents are page-aligned */ for (i = pg_index; i < rdata->pages.npages; i++) { if (!extent_length) { /* We've used up the previous extent */ bl_put_extent(be); bl_put_extent(cow_read); bio = bl_submit_bio(READ, bio); /* Get the next one */ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read); if (!be) { header->pnfs_error = -EIO; goto out; } extent_length = be->be_length - (isect - be->be_f_offset); if (cow_read) { sector_t cow_length = cow_read->be_length - (isect - cow_read->be_f_offset); extent_length = min(extent_length, cow_length); } } if (is_dio) { pg_offset = f_offset & ~PAGE_CACHE_MASK; if (pg_offset + bytes_left > PAGE_CACHE_SIZE) pg_len = PAGE_CACHE_SIZE - pg_offset; else pg_len = bytes_left; f_offset += pg_len; bytes_left -= pg_len; isect += (pg_offset >> SECTOR_SHIFT); } else { pg_offset = 0; pg_len = PAGE_CACHE_SIZE; } hole = is_hole(be, isect); if (hole && !cow_read) { bio = bl_submit_bio(READ, bio); /* Fill hole w/ zeroes w/o accessing device */ dprintk("%s Zeroing page for hole\n", __func__); zero_user_segment(pages[i], pg_offset, pg_len); print_page(pages[i]); SetPageUptodate(pages[i]); } else { struct pnfs_block_extent *be_read; be_read = (hole && cow_read) ? cow_read : be; bio = do_add_page_to_bio(bio, rdata->pages.npages - i, READ, isect, pages[i], be_read, bl_end_io_read, par, pg_offset, pg_len); if (IS_ERR(bio)) { header->pnfs_error = PTR_ERR(bio); bio = NULL; goto out; } } isect += (pg_len >> SECTOR_SHIFT); extent_length -= PAGE_CACHE_SECTORS; } if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { rdata->res.eof = 1; rdata->res.count = header->inode->i_size - rdata->args.offset; } else { rdata->res.count = (isect << SECTOR_SHIFT) - rdata->args.offset; } out: bl_put_extent(be); bl_put_extent(cow_read); bl_submit_bio(READ, bio); put_parallel(par); return PNFS_ATTEMPTED; use_mds: dprintk("Giving up and using normal NFS\n"); return PNFS_NOT_ATTEMPTED; } static void mark_extents_written(struct pnfs_block_layout *bl, __u64 offset, __u32 count) { sector_t isect, end; struct pnfs_block_extent *be; struct pnfs_block_short_extent *se; dprintk("%s(%llu, %u)\n", __func__, offset, count); if (count == 0) return; isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT; end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK); end >>= SECTOR_SHIFT; while (isect < end) { sector_t len; be = bl_find_get_extent(bl, isect, NULL); BUG_ON(!be); /* FIXME */ len = min(end, be->be_f_offset + be->be_length) - isect; if (be->be_state == PNFS_BLOCK_INVALID_DATA) { se = bl_pop_one_short_extent(be->be_inval); BUG_ON(!se); bl_mark_for_commit(be, isect, len, se); } isect += len; bl_put_extent(be); } } static void bl_end_io_write_zero(struct bio *bio, int err) { struct parallel_io *par = bio->bi_private; struct bio_vec *bvec; int i; bio_for_each_segment_all(bvec, bio, i) { /* This is the zeroing page we added */ end_page_writeback(bvec->bv_page); page_cache_release(bvec->bv_page); } if (unlikely(err)) { struct nfs_write_data *data = par->data; struct nfs_pgio_header *header = data->header; if (!header->pnfs_error) header->pnfs_error = -EIO; pnfs_set_lo_fail(header->lseg); } bio_put(bio); put_parallel(par); } static void bl_end_io_write(struct bio *bio, int err) { struct parallel_io *par = bio->bi_private; const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct nfs_write_data *data = par->data; struct nfs_pgio_header *header = data->header; if (!uptodate) { if (!header->pnfs_error) header->pnfs_error = -EIO; pnfs_set_lo_fail(header->lseg); } bio_put(bio); put_parallel(par); } /* Function scheduled for call during bl_end_par_io_write, * it marks sectors as written and extends the commitlist. */ static void bl_write_cleanup(struct work_struct *work) { struct rpc_task *task; struct nfs_write_data *wdata; dprintk("%s enter\n", __func__); task = container_of(work, struct rpc_task, u.tk_work); wdata = container_of(task, struct nfs_write_data, task); if (likely(!wdata->header->pnfs_error)) { /* Marks for LAYOUTCOMMIT */ mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg), wdata->args.offset, wdata->args.count); } pnfs_ld_write_done(wdata); } /* Called when last of bios associated with a bl_write_pagelist call finishes */ static void bl_end_par_io_write(void *data, int num_se) { struct nfs_write_data *wdata = data; if (unlikely(wdata->header->pnfs_error)) { bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval, num_se); } wdata->task.tk_status = wdata->header->pnfs_error; wdata->verf.committed = NFS_FILE_SYNC; INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); schedule_work(&wdata->task.u.tk_work); } /* FIXME STUB - mark intersection of layout and page as bad, so is not * used again. */ static void mark_bad_read(void) { return; } /* * map_block: map a requested I/0 block (isect) into an offset in the LVM * block_device */ static void map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be) { dprintk("%s enter be=%p\n", __func__, be); set_buffer_mapped(bh); bh->b_bdev = be->be_mdev; bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >> (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT); dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n", __func__, (unsigned long long)isect, (long)bh->b_blocknr, bh->b_size); return; } static void bl_read_single_end_io(struct bio *bio, int error) { struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct page *page = bvec->bv_page; /* Only one page in bvec */ unlock_page(page); } static int bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be, unsigned int offset, unsigned int len) { struct bio *bio; struct page *shadow_page; sector_t isect; char *kaddr, *kshadow_addr; int ret = 0; dprintk("%s: offset %u len %u\n", __func__, offset, len); shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (shadow_page == NULL) return -ENOMEM; bio = bio_alloc(GFP_NOIO, 1); if (bio == NULL) return -ENOMEM; isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + (offset / SECTOR_SIZE); bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset; bio->bi_bdev = be->be_mdev; bio->bi_end_io = bl_read_single_end_io; lock_page(shadow_page); if (bio_add_page(bio, shadow_page, SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) { unlock_page(shadow_page); bio_put(bio); return -EIO; } submit_bio(READ, bio); wait_on_page_locked(shadow_page); if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) { ret = -EIO; } else { kaddr = kmap_atomic(page); kshadow_addr = kmap_atomic(shadow_page); memcpy(kaddr + offset, kshadow_addr + offset, len); kunmap_atomic(kshadow_addr); kunmap_atomic(kaddr); } __free_page(shadow_page); bio_put(bio); return ret; } static int bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be, unsigned int dirty_offset, unsigned int dirty_len, bool full_page) { int ret = 0; unsigned int start, end; if (full_page) { start = 0; end = PAGE_CACHE_SIZE; } else { start = round_down(dirty_offset, SECTOR_SIZE); end = round_up(dirty_offset + dirty_len, SECTOR_SIZE); } dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len); if (!be) { zero_user_segments(page, start, dirty_offset, dirty_offset + dirty_len, end); if (start == 0 && end == PAGE_CACHE_SIZE && trylock_page(page)) { SetPageUptodate(page); unlock_page(page); } return ret; } if (start != dirty_offset) ret = bl_do_readpage_sync(page, be, start, dirty_offset - start); if (!ret && (dirty_offset + dirty_len < end)) ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len, end - dirty_offset - dirty_len); return ret; } /* Given an unmapped page, zero it or read in page for COW, page is locked * by caller. */ static int init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read) { struct buffer_head *bh = NULL; int ret = 0; sector_t isect; dprintk("%s enter, %p\n", __func__, page); BUG_ON(PageUptodate(page)); if (!cow_read) { zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); goto cleanup; } bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0); if (!bh) { ret = -ENOMEM; goto cleanup; } isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT; map_block(bh, isect, cow_read); if (!bh_uptodate_or_lock(bh)) ret = bh_submit_read(bh); if (ret) goto cleanup; SetPageUptodate(page); cleanup: if (bh) free_buffer_head(bh); if (ret) { /* Need to mark layout with bad read...should now * just use nfs4 for reads and writes. */ mark_bad_read(); } return ret; } /* Find or create a zeroing page marked being writeback. * Return ERR_PTR on error, NULL to indicate skip this page and page itself * to indicate write out. */ static struct page * bl_find_get_zeroing_page(struct inode *inode, pgoff_t index, struct pnfs_block_extent *cow_read) { struct page *page; int locked = 0; page = find_get_page(inode->i_mapping, index); if (page) goto check_page; page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); if (unlikely(!page)) { dprintk("%s oom\n", __func__); return ERR_PTR(-ENOMEM); } locked = 1; check_page: /* PageDirty: Other will write this out * PageWriteback: Other is writing this out * PageUptodate: It was read before */ if (PageDirty(page) || PageWriteback(page)) { print_page(page); if (locked) unlock_page(page); page_cache_release(page); return NULL; } if (!locked) { lock_page(page); locked = 1; goto check_page; } if (!PageUptodate(page)) { /* New page, readin or zero it */ init_page_for_write(page, cow_read); } set_page_writeback(page); unlock_page(page); return page; } static enum pnfs_try_status bl_write_pagelist(struct nfs_write_data *wdata, int sync) { struct nfs_pgio_header *header = wdata->header; int i, ret, npg_zero, pg_index, last = 0; struct bio *bio = NULL; struct pnfs_block_extent *be = NULL, *cow_read = NULL; sector_t isect, last_isect = 0, extent_length = 0; struct parallel_io *par = NULL; loff_t offset = wdata->args.offset; size_t count = wdata->args.count; unsigned int pg_offset, pg_len, saved_len; struct page **pages = wdata->args.pages; struct page *page; pgoff_t index; u64 temp; int npg_per_block = NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT; dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); if (header->dreq != NULL && (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) || !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) { dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n"); goto out_mds; } /* At this point, wdata->pages is a (sequential) list of nfs_pages. * We want to write each, and if there is an error set pnfs_error * to have it redone using nfs. */ par = alloc_parallel(wdata); if (!par) goto out_mds; par->pnfs_callback = bl_end_par_io_write; /* At this point, have to be more careful with error handling */ isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read); if (!be || !is_writable(be, isect)) { dprintk("%s no matching extents!\n", __func__); goto out_mds; } /* First page inside INVALID extent */ if (be->be_state == PNFS_BLOCK_INVALID_DATA) { if (likely(!bl_push_one_short_extent(be->be_inval))) par->bse_count++; else goto out_mds; temp = offset >> PAGE_CACHE_SHIFT; npg_zero = do_div(temp, npg_per_block); isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); extent_length = be->be_length - (isect - be->be_f_offset); fill_invalid_ext: dprintk("%s need to zero %d pages\n", __func__, npg_zero); for (;npg_zero > 0; npg_zero--) { if (bl_is_sector_init(be->be_inval, isect)) { dprintk("isect %llu already init\n", (unsigned long long)isect); goto next_page; } /* page ref released in bl_end_io_write_zero */ index = isect >> PAGE_CACHE_SECTOR_SHIFT; dprintk("%s zero %dth page: index %lu isect %llu\n", __func__, npg_zero, index, (unsigned long long)isect); page = bl_find_get_zeroing_page(header->inode, index, cow_read); if (unlikely(IS_ERR(page))) { header->pnfs_error = PTR_ERR(page); goto out; } else if (page == NULL) goto next_page; ret = bl_mark_sectors_init(be->be_inval, isect, PAGE_CACHE_SECTORS); if (unlikely(ret)) { dprintk("%s bl_mark_sectors_init fail %d\n", __func__, ret); end_page_writeback(page); page_cache_release(page); header->pnfs_error = ret; goto out; } if (likely(!bl_push_one_short_extent(be->be_inval))) par->bse_count++; else { end_page_writeback(page); page_cache_release(page); header->pnfs_error = -ENOMEM; goto out; } /* FIXME: This should be done in bi_end_io */ mark_extents_written(BLK_LSEG2EXT(header->lseg), page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); bio = bl_add_page_to_bio(bio, npg_zero, WRITE, isect, page, be, bl_end_io_write_zero, par); if (IS_ERR(bio)) { header->pnfs_error = PTR_ERR(bio); bio = NULL; goto out; } next_page: isect += PAGE_CACHE_SECTORS; extent_length -= PAGE_CACHE_SECTORS; } if (last) goto write_done; } bio = bl_submit_bio(WRITE, bio); /* Middle pages */ pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; for (i = pg_index; i < wdata->pages.npages; i++) { if (!extent_length) { /* We've used up the previous extent */ bl_put_extent(be); bl_put_extent(cow_read); bio = bl_submit_bio(WRITE, bio); /* Get the next one */ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read); if (!be || !is_writable(be, isect)) { header->pnfs_error = -EINVAL; goto out; } if (be->be_state == PNFS_BLOCK_INVALID_DATA) { if (likely(!bl_push_one_short_extent( be->be_inval))) par->bse_count++; else { header->pnfs_error = -ENOMEM; goto out; } } extent_length = be->be_length - (isect - be->be_f_offset); } dprintk("%s offset %lld count %Zu\n", __func__, offset, count); pg_offset = offset & ~PAGE_CACHE_MASK; if (pg_offset + count > PAGE_CACHE_SIZE) pg_len = PAGE_CACHE_SIZE - pg_offset; else pg_len = count; saved_len = pg_len; if (be->be_state == PNFS_BLOCK_INVALID_DATA && !bl_is_sector_init(be->be_inval, isect)) { ret = bl_read_partial_page_sync(pages[i], cow_read, pg_offset, pg_len, true); if (ret) { dprintk("%s bl_read_partial_page_sync fail %d\n", __func__, ret); header->pnfs_error = ret; goto out; } ret = bl_mark_sectors_init(be->be_inval, isect, PAGE_CACHE_SECTORS); if (unlikely(ret)) { dprintk("%s bl_mark_sectors_init fail %d\n", __func__, ret); header->pnfs_error = ret; goto out; } /* Expand to full page write */ pg_offset = 0; pg_len = PAGE_CACHE_SIZE; } else if ((pg_offset & (SECTOR_SIZE - 1)) || (pg_len & (SECTOR_SIZE - 1))){ /* ahh, nasty case. We have to do sync full sector * read-modify-write cycles. */ unsigned int saved_offset = pg_offset; ret = bl_read_partial_page_sync(pages[i], be, pg_offset, pg_len, false); pg_offset = round_down(pg_offset, SECTOR_SIZE); pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE) - pg_offset; } bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE, isect, pages[i], be, bl_end_io_write, par, pg_offset, pg_len); if (IS_ERR(bio)) { header->pnfs_error = PTR_ERR(bio); bio = NULL; goto out; } offset += saved_len; count -= saved_len; isect += PAGE_CACHE_SECTORS; last_isect = isect; extent_length -= PAGE_CACHE_SECTORS; } /* Last page inside INVALID extent */ if (be->be_state == PNFS_BLOCK_INVALID_DATA) { bio = bl_submit_bio(WRITE, bio); temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT; npg_zero = npg_per_block - do_div(temp, npg_per_block); if (npg_zero < npg_per_block) { last = 1; goto fill_invalid_ext; } } write_done: wdata->res.count = wdata->args.count; out: bl_put_extent(be); bl_put_extent(cow_read); bl_submit_bio(WRITE, bio); put_parallel(par); return PNFS_ATTEMPTED; out_mds: bl_put_extent(be); bl_put_extent(cow_read); kfree(par); return PNFS_NOT_ATTEMPTED; } /* FIXME - range ignored */ static void release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range) { int i; struct pnfs_block_extent *be; spin_lock(&bl->bl_ext_lock); for (i = 0; i < EXTENT_LISTS; i++) { while (!list_empty(&bl->bl_extents[i])) { be = list_first_entry(&bl->bl_extents[i], struct pnfs_block_extent, be_node); list_del(&be->be_node); bl_put_extent(be); } } spin_unlock(&bl->bl_ext_lock); } static void release_inval_marks(struct pnfs_inval_markings *marks) { struct pnfs_inval_tracking *pos, *temp; struct pnfs_block_short_extent *se, *stemp; list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) { list_del(&pos->it_link); kfree(pos); } list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) { list_del(&se->bse_node); kfree(se); } return; } static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo) { struct pnfs_block_layout *bl = BLK_LO2EXT(lo); dprintk("%s enter\n", __func__); release_extents(bl, NULL); release_inval_marks(&bl->bl_inval); kfree(bl); } static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) { struct pnfs_block_layout *bl; dprintk("%s enter\n", __func__); bl = kzalloc(sizeof(*bl), gfp_flags); if (!bl) return NULL; spin_lock_init(&bl->bl_ext_lock); INIT_LIST_HEAD(&bl->bl_extents[0]); INIT_LIST_HEAD(&bl->bl_extents[1]); INIT_LIST_HEAD(&bl->bl_commit); INIT_LIST_HEAD(&bl->bl_committing); bl->bl_count = 0; bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT; BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize); return &bl->bl_layout; } static void bl_free_lseg(struct pnfs_layout_segment *lseg) { dprintk("%s enter\n", __func__); kfree(lseg); } /* We pretty much ignore lseg, and store all data layout wide, so we * can correctly merge. */ static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags) { struct pnfs_layout_segment *lseg; int status; dprintk("%s enter\n", __func__); lseg = kzalloc(sizeof(*lseg), gfp_flags); if (!lseg) return ERR_PTR(-ENOMEM); status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags); if (status) { /* We don't want to call the full-blown bl_free_lseg, * since on error extents were not touched. */ kfree(lseg); return ERR_PTR(status); } return lseg; } static void bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr, const struct nfs4_layoutcommit_args *arg) { dprintk("%s enter\n", __func__); encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg); } static void bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata) { struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout; dprintk("%s enter\n", __func__); clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status); } static void free_blk_mountid(struct block_mount_id *mid) { if (mid) { struct pnfs_block_dev *dev, *tmp; /* No need to take bm_lock as we are last user freeing bm_devlist */ list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) { list_del(&dev->bm_node); bl_free_block_dev(dev); } kfree(mid); } } /* This is mostly copied from the filelayout_get_device_info function. * It seems much of this should be at the generic pnfs level. */ static struct pnfs_block_dev * nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, struct nfs4_deviceid *d_id) { struct pnfs_device *dev; struct pnfs_block_dev *rv; u32 max_resp_sz; int max_pages; struct page **pages = NULL; int i, rc; /* * Use the session max response size as the basis for setting * GETDEVICEINFO's maxcount */ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; max_pages = nfs_page_array_len(0, max_resp_sz); dprintk("%s max_resp_sz %u max_pages %d\n", __func__, max_resp_sz, max_pages); dev = kmalloc(sizeof(*dev), GFP_NOFS); if (!dev) { dprintk("%s kmalloc failed\n", __func__); return ERR_PTR(-ENOMEM); } pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS); if (pages == NULL) { kfree(dev); return ERR_PTR(-ENOMEM); } for (i = 0; i < max_pages; i++) { pages[i] = alloc_page(GFP_NOFS); if (!pages[i]) { rv = ERR_PTR(-ENOMEM); goto out_free; } } memcpy(&dev->dev_id, d_id, sizeof(*d_id)); dev->layout_type = LAYOUT_BLOCK_VOLUME; dev->pages = pages; dev->pgbase = 0; dev->pglen = PAGE_SIZE * max_pages; dev->mincount = 0; dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead; dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data); rc = nfs4_proc_getdeviceinfo(server, dev, NULL); dprintk("%s getdevice info returns %d\n", __func__, rc); if (rc) { rv = ERR_PTR(rc); goto out_free; } rv = nfs4_blk_decode_device(server, dev); out_free: for (i = 0; i < max_pages; i++) __free_page(pages[i]); kfree(pages); kfree(dev); return rv; } static int bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh) { struct block_mount_id *b_mt_id = NULL; struct pnfs_devicelist *dlist = NULL; struct pnfs_block_dev *bdev; LIST_HEAD(block_disklist); int status, i; dprintk("%s enter\n", __func__); if (server->pnfs_blksize == 0) { dprintk("%s Server did not return blksize\n", __func__); return -EINVAL; } b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS); if (!b_mt_id) { status = -ENOMEM; goto out_error; } /* Initialize nfs4 block layout mount id */ spin_lock_init(&b_mt_id->bm_lock); INIT_LIST_HEAD(&b_mt_id->bm_devlist); dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS); if (!dlist) { status = -ENOMEM; goto out_error; } dlist->eof = 0; while (!dlist->eof) { status = nfs4_proc_getdevicelist(server, fh, dlist); if (status) goto out_error; dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n", __func__, dlist->num_devs, dlist->eof); for (i = 0; i < dlist->num_devs; i++) { bdev = nfs4_blk_get_deviceinfo(server, fh, &dlist->dev_id[i]); if (IS_ERR(bdev)) { status = PTR_ERR(bdev); goto out_error; } spin_lock(&b_mt_id->bm_lock); list_add(&bdev->bm_node, &b_mt_id->bm_devlist); spin_unlock(&b_mt_id->bm_lock); } } dprintk("%s SUCCESS\n", __func__); server->pnfs_ld_data = b_mt_id; out_return: kfree(dlist); return status; out_error: free_blk_mountid(b_mt_id); goto out_return; } static int bl_clear_layoutdriver(struct nfs_server *server) { struct block_mount_id *b_mt_id = server->pnfs_ld_data; dprintk("%s enter\n", __func__); free_blk_mountid(b_mt_id); dprintk("%s RETURNS\n", __func__); return 0; } static bool is_aligned_req(struct nfs_page *req, unsigned int alignment) { return IS_ALIGNED(req->wb_offset, alignment) && IS_ALIGNED(req->wb_bytes, alignment); } static void bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { if (pgio->pg_dreq != NULL && !is_aligned_req(req, SECTOR_SIZE)) nfs_pageio_reset_read_mds(pgio); else pnfs_generic_pg_init_read(pgio, req); } static bool bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { if (pgio->pg_dreq != NULL && !is_aligned_req(req, SECTOR_SIZE)) return false; return pnfs_generic_pg_test(pgio, prev, req); } /* * Return the number of contiguous bytes for a given inode * starting at page frame idx. */ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) { struct address_space *mapping = inode->i_mapping; pgoff_t end; /* Optimize common case that writes from 0 to end of file */ end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); if (end != NFS_I(inode)->npages) { rcu_read_lock(); end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); rcu_read_unlock(); } if (!end) return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); else return (end - idx) << PAGE_CACHE_SHIFT; } static void bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { if (pgio->pg_dreq != NULL && !is_aligned_req(req, PAGE_CACHE_SIZE)) { nfs_pageio_reset_write_mds(pgio); } else { u64 wb_size; if (pgio->pg_dreq == NULL) wb_size = pnfs_num_cont_bytes(pgio->pg_inode, req->wb_index); else wb_size = nfs_dreq_bytes_left(pgio->pg_dreq); pnfs_generic_pg_init_write(pgio, req, wb_size); } } static bool bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { if (pgio->pg_dreq != NULL && !is_aligned_req(req, PAGE_CACHE_SIZE)) return false; return pnfs_generic_pg_test(pgio, prev, req); } static const struct nfs_pageio_ops bl_pg_read_ops = { .pg_init = bl_pg_init_read, .pg_test = bl_pg_test_read, .pg_doio = pnfs_generic_pg_readpages, }; static const struct nfs_pageio_ops bl_pg_write_ops = { .pg_init = bl_pg_init_write, .pg_test = bl_pg_test_write, .pg_doio = pnfs_generic_pg_writepages, }; static struct pnfs_layoutdriver_type blocklayout_type = { .id = LAYOUT_BLOCK_VOLUME, .name = "LAYOUT_BLOCK_VOLUME", .owner = THIS_MODULE, .read_pagelist = bl_read_pagelist, .write_pagelist = bl_write_pagelist, .alloc_layout_hdr = bl_alloc_layout_hdr, .free_layout_hdr = bl_free_layout_hdr, .alloc_lseg = bl_alloc_lseg, .free_lseg = bl_free_lseg, .encode_layoutcommit = bl_encode_layoutcommit, .cleanup_layoutcommit = bl_cleanup_layoutcommit, .set_layoutdriver = bl_set_layoutdriver, .clear_layoutdriver = bl_clear_layoutdriver, .pg_read_ops = &bl_pg_read_ops, .pg_write_ops = &bl_pg_write_ops, }; static const struct rpc_pipe_ops bl_upcall_ops = { .upcall = rpc_pipe_generic_upcall, .downcall = bl_pipe_downcall, .destroy_msg = bl_pipe_destroy_msg, }; static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb, struct rpc_pipe *pipe) { struct dentry *dir, *dentry; dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME); if (dir == NULL) return ERR_PTR(-ENOENT); dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe); dput(dir); return dentry; } static void nfs4blocklayout_unregister_sb(struct super_block *sb, struct rpc_pipe *pipe) { if (pipe->dentry) rpc_unlink(pipe->dentry); } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; struct net *net = sb->s_fs_info; struct nfs_net *nn = net_generic(net, nfs_net_id); struct dentry *dentry; int ret = 0; if (!try_module_get(THIS_MODULE)) return 0; if (nn->bl_device_pipe == NULL) { module_put(THIS_MODULE); return 0; } switch (event) { case RPC_PIPEFS_MOUNT: dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); break; } nn->bl_device_pipe->dentry = dentry; break; case RPC_PIPEFS_UMOUNT: if (nn->bl_device_pipe->dentry) nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); break; default: ret = -ENOTSUPP; break; } module_put(THIS_MODULE); return ret; } static struct notifier_block nfs4blocklayout_block = { .notifier_call = rpc_pipefs_event, }; static struct dentry *nfs4blocklayout_register_net(struct net *net, struct rpc_pipe *pipe) { struct super_block *pipefs_sb; struct dentry *dentry; pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) return NULL; dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe); rpc_put_sb_net(net); return dentry; } static void nfs4blocklayout_unregister_net(struct net *net, struct rpc_pipe *pipe) { struct super_block *pipefs_sb; pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { nfs4blocklayout_unregister_sb(pipefs_sb, pipe); rpc_put_sb_net(net); } } static int nfs4blocklayout_net_init(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); struct dentry *dentry; init_waitqueue_head(&nn->bl_wq); nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); if (IS_ERR(nn->bl_device_pipe)) return PTR_ERR(nn->bl_device_pipe); dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe); if (IS_ERR(dentry)) { rpc_destroy_pipe_data(nn->bl_device_pipe); return PTR_ERR(dentry); } nn->bl_device_pipe->dentry = dentry; return 0; } static void nfs4blocklayout_net_exit(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); nfs4blocklayout_unregister_net(net, nn->bl_device_pipe); rpc_destroy_pipe_data(nn->bl_device_pipe); nn->bl_device_pipe = NULL; } static struct pernet_operations nfs4blocklayout_net_ops = { .init = nfs4blocklayout_net_init, .exit = nfs4blocklayout_net_exit, }; static int __init nfs4blocklayout_init(void) { int ret; dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__); ret = pnfs_register_layoutdriver(&blocklayout_type); if (ret) goto out; ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block); if (ret) goto out_remove; ret = register_pernet_subsys(&nfs4blocklayout_net_ops); if (ret) goto out_notifier; out: return ret; out_notifier: rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); out_remove: pnfs_unregister_layoutdriver(&blocklayout_type); return ret; } static void __exit nfs4blocklayout_exit(void) { dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n", __func__); rpc_pipefs_notifier_unregister(&nfs4blocklayout_block); unregister_pernet_subsys(&nfs4blocklayout_net_ops); pnfs_unregister_layoutdriver(&blocklayout_type); } MODULE_ALIAS("nfs-layouttype4-3"); module_init(nfs4blocklayout_init); module_exit(nfs4blocklayout_exit);
gpl-2.0
smipi1/elce2015-tiny-linux
drivers/net/ethernet/dec/tulip/media.c
2473
16681
/* drivers/net/ethernet/dec/tulip/media.c Copyright 2000,2001 The Linux Kernel Team Written/copyright 1994-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Please submit bugs to http://bugzilla.kernel.org/ . */ #include <linux/kernel.h> #include <linux/mii.h> #include <linux/delay.h> #include <linux/pci.h> #include "tulip.h" /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back PCI I/O cycles, but we insert a delay to avoid "overclocking" issues or future 66Mhz PCI. */ #define mdio_delay() ioread32(mdio_addr) /* Read and write the MII registers using software-generated serial MDIO protocol. It is just different enough from the EEPROM protocol to not share code. The maxium data clock rate is 2.5 Mhz. */ #define MDIO_SHIFT_CLK 0x10000 #define MDIO_DATA_WRITE0 0x00000 #define MDIO_DATA_WRITE1 0x20000 #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ #define MDIO_ENB_IN 0x40000 #define MDIO_DATA_READ 0x80000 static const unsigned char comet_miireg2offset[32] = { 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") or DP83840A data sheet for more details. */ int tulip_mdio_read(struct net_device *dev, int phy_id, int location) { struct tulip_private *tp = netdev_priv(dev); int i; int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; int retval = 0; void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return 0xffff; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) return ioread32(ioaddr + comet_miireg2offset[location]); return 0xffff; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); ioread32(ioaddr + 0xA0); ioread32(ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return retval & 0xffff; } /* Establish sync by sending at least 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); return (retval>>1) & 0xffff; } void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) { struct tulip_private *tp = netdev_priv(dev); int i; int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); void __iomem *ioaddr = tp->base_addr; void __iomem *mdio_addr = ioaddr + CSR9; unsigned long flags; if (location & ~0x1f) return; if (tp->chip_id == COMET && phy_id == 30) { if (comet_miireg2offset[location]) iowrite32(val, ioaddr + comet_miireg2offset[location]); return; } spin_lock_irqsave(&tp->mii_lock, flags); if (tp->chip_id == LC82C168) { iowrite32(cmd, ioaddr + 0xA0); for (i = 1000; i >= 0; --i) { barrier(); if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) break; } spin_unlock_irqrestore(&tp->mii_lock, flags); return; } /* Establish sync by sending 32 logic ones. */ for (i = 32; i >= 0; i--) { iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; iowrite32(MDIO_ENB | dataval, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite32(MDIO_ENB_IN, mdio_addr); mdio_delay(); iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); mdio_delay(); } spin_unlock_irqrestore(&tp->mii_lock, flags); } /* Set up the transceiver control registers for the selected media type. */ void tulip_select_media(struct net_device *dev, int startup) { struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; struct mediatable *mtable = tp->mtable; u32 new_csr6; int i; if (mtable) { struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; unsigned char *p = mleaf->leafdata; switch (mleaf->type) { case 0: /* 21140 non-MII xcvr. */ if (tulip_debug > 1) netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", p[1]); dev->if_port = p[0]; if (startup) iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(p[1], ioaddr + CSR12); new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); break; case 2: case 4: { u16 setup[5]; u32 csr13val, csr14val, csr15dir, csr15val; for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); dev->if_port = p[0] & MEDIA_MASK; if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) tp->full_duplex = 1; if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } if (tulip_debug > 1) netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", medianame[dev->if_port], setup[0], setup[1]); if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ csr13val = setup[0]; csr14val = setup[1]; csr15dir = (setup[3]<<16) | setup[2]; csr15val = (setup[4]<<16) | setup[2]; iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ iowrite32(csr13val, ioaddr + CSR13); } else { csr13val = 1; csr14val = 0; csr15dir = (setup[0]<<16) | 0x0008; csr15val = (setup[1]<<16) | 0x0008; if (dev->if_port <= 4) csr14val = t21142_csr14[dev->if_port]; if (startup) { iowrite32(0, ioaddr + CSR13); iowrite32(csr14val, ioaddr + CSR14); } iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ iowrite32(csr15val, ioaddr + CSR15); /* Data */ if (startup) iowrite32(csr13val, ioaddr + CSR13); } if (tulip_debug > 1) netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", csr15dir, csr15val); if (mleaf->type == 4) new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); else new_csr6 = 0x82420000; break; } case 1: case 3: { int phy_num = p[0]; int init_length = p[1]; u16 *misc_info, tmp_info; dev->if_port = 11; new_csr6 = 0x020E0000; if (mleaf->type == 3) { /* 21142 */ u16 *init_sequence = (u16*)(p+2); u16 *reset_sequence = &((u16*)(p+3))[init_length]; int reset_length = p[2 + init_length*2]; misc_info = reset_sequence + reset_length; if (startup) { int timeout = 10; /* max 1 ms */ for (i = 0; i < reset_length; i++) iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); /* flush posted writes */ ioread32(ioaddr + CSR15); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); ioread32(ioaddr + CSR15); /* flush posted writes */ } else { u8 *init_sequence = p + 2; u8 *reset_sequence = p + 3 + init_length; int reset_length = p[2 + init_length]; misc_info = (u16*)(reset_sequence + reset_length); if (startup) { int timeout = 10; /* max 1 ms */ iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); for (i = 0; i < reset_length; i++) iowrite32(reset_sequence[i], ioaddr + CSR12); /* flush posted writes */ ioread32(ioaddr + CSR12); /* Sect 3.10.3 in DP83840A.pdf (p39) */ udelay(500); /* Section 4.2 in DP83840A.pdf (p43) */ /* and IEEE 802.3 "22.2.4.1.1 Reset" */ while (timeout-- && (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) udelay(100); } for (i = 0; i < init_length; i++) iowrite32(init_sequence[i], ioaddr + CSR12); ioread32(ioaddr + CSR12); /* flush posted writes */ } tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; if (tmp_info && startup < 2) { if (tp->mii_advertise == 0) tp->mii_advertise = tp->advertising[phy_num]; if (tulip_debug > 1) netdev_dbg(dev, " Advertising %04x on MII %d\n", tp->mii_advertise, tp->phys[phy_num]); tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); } break; } case 5: case 6: { u16 setup[5]; new_csr6 = 0; /* FIXME */ for (i = 0; i < 5; i++) setup[i] = get_u16(&p[i*2 + 1]); if (startup && mtable->has_reset) { struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; unsigned char *rst = rleaf->leafdata; if (tulip_debug > 1) netdev_dbg(dev, "Resetting the transceiver\n"); for (i = 0; i < rst[0]; i++) iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); } break; } default: netdev_dbg(dev, " Invalid media table selection %d\n", mleaf->type); new_csr6 = 0x020E0000; } if (tulip_debug > 1) netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12) & 0xff); } else if (tp->chip_id == LC82C168) { if (startup && ! tp->medialock) dev->if_port = tp->mii_cnt ? 11 : 0; if (tulip_debug > 1) netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", ioread32(ioaddr + 0xB8), medianame[dev->if_port]); if (tp->mii_cnt) { new_csr6 = 0x810C0000; iowrite32(0x0001, ioaddr + CSR15); iowrite32(0x0201B07A, ioaddr + 0xB8); } else if (startup) { /* Start with 10mbps to do autonegotiation. */ iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x0001B078, ioaddr + 0xB8); iowrite32(0x0201B078, ioaddr + 0xB8); } else if (dev->if_port == 3 || dev->if_port == 5) { iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } } else { /* Unknown chip type with no media table. */ if (tp->default_port == 0) dev->if_port = tp->mii_cnt ? 11 : 3; if (tulip_media_cap[dev->if_port] & MediaIsMII) { new_csr6 = 0x020E0000; } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { new_csr6 = 0x02860000; } else new_csr6 = 0x03860000; if (tulip_debug > 1) netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", medianame[dev->if_port], ioread32(ioaddr + CSR12)); } tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); mdelay(1); } /* Check the MII negotiated duplex and change the CSR6 setting if required. Return 0 if everything is OK. Return < 0 if the transceiver is missing or has no link beat. */ int tulip_check_duplex(struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); unsigned int bmsr, lpa, negotiated, new_csr6; bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); if (tulip_debug > 1) dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", bmsr, lpa); if (bmsr == 0xffff) return -2; if ((bmsr & BMSR_LSTATUS) == 0) { int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); if ((new_bmsr & BMSR_LSTATUS) == 0) { if (tulip_debug > 1) dev_info(&dev->dev, "No link beat on the MII interface, status %04x\n", new_bmsr); return -1; } } negotiated = lpa & tp->advertising[0]; tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); new_csr6 = tp->csr6; if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; else new_csr6 |= TxThreshold; if (tp->full_duplex) new_csr6 |= FullDuplex; else new_csr6 &= ~FullDuplex; if (new_csr6 != tp->csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); if (tulip_debug > 0) dev_info(&dev->dev, "Setting %s-duplex based on MII#%d link partner capability of %04x\n", tp->full_duplex ? "full" : "half", tp->phys[0], lpa); return 1; } return 0; } void tulip_find_mii(struct net_device *dev, int board_idx) { struct tulip_private *tp = netdev_priv(dev); int phyn, phy_idx = 0; int mii_reg0; int mii_advert; unsigned int to_advert, new_bmcr, ane_switch; /* Find the connected MII xcvrs. Doing this in open() would allow detecting external xcvrs later, but takes much time. */ for (phyn = 1; phyn <= 32 && phy_idx < ARRAY_SIZE(tp->phys); phyn++) { int phy = phyn & 0x1f; int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); if ((mii_status & 0x8301) == 0x8001 || ((mii_status & BMSR_100BASE4) == 0 && (mii_status & 0x7800) != 0)) { /* preserve Becker logic, gain indentation level */ } else { continue; } mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); ane_switch = 0; /* if not advertising at all, gen an * advertising value from the capability * bits in BMSR */ if ((mii_advert & ADVERTISE_ALL) == 0) { unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; } if (tp->mii_advertise) { tp->advertising[phy_idx] = to_advert = tp->mii_advertise; } else if (tp->advertising[phy_idx]) { to_advert = tp->advertising[phy_idx]; } else { tp->advertising[phy_idx] = tp->mii_advertise = to_advert = mii_advert; } tp->phys[phy_idx++] = phy; pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", board_idx, phy, mii_reg0, mii_status, mii_advert); /* Fixup for DLink with miswired PHY. */ if (mii_advert != to_advert) { pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", board_idx, to_advert, phy, mii_advert); tulip_mdio_write (dev, phy, 4, to_advert); } /* Enable autonegotiation: some boards default to off. */ if (tp->default_port == 0) { new_bmcr = mii_reg0 | BMCR_ANENABLE; if (new_bmcr != mii_reg0) { new_bmcr |= BMCR_ANRESTART; ane_switch = 1; } } /* ...or disable nway, if forcing media */ else { new_bmcr = mii_reg0 & ~BMCR_ANENABLE; if (new_bmcr != mii_reg0) ane_switch = 1; } /* clear out bits we never want at this point */ new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | BMCR_RESET); if (tp->full_duplex) new_bmcr |= BMCR_FULLDPLX; if (tulip_media_cap[tp->default_port] & MediaIs100) new_bmcr |= BMCR_SPEED100; if (new_bmcr != mii_reg0) { /* some phys need the ANE switch to * happen before forced media settings * will "take." However, we write the * same value twice in order not to * confuse the sane phys. */ if (ane_switch) { tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); udelay (10); } tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); } } tp->mii_cnt = phy_idx; if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", board_idx); tp->phys[0] = 1; } }
gpl-2.0
SOKP/kernel_cyanogen_msm8916
kernel/time/timer_list.c
2985
9231
/* * kernel/time/timer_list.c * * List pending timers * * Copyright(C) 2006, Red Hat, Inc., Ingo Molnar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/tick.h> #include <asm/uaccess.h> struct timer_list_iter { int cpu; bool second_pass; u64 now; }; typedef void (*print_fn_t)(struct seq_file *m, unsigned int *classes); DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); /* * This allows printing both to /proc/timer_list and * to the console (on SysRq-Q): */ #define SEQ_printf(m, x...) \ do { \ if (m) \ seq_printf(m, x); \ else \ printk(x); \ } while (0) static void print_name_offset(struct seq_file *m, void *sym) { char symname[KSYM_NAME_LEN]; if (lookup_symbol_name((unsigned long)sym, symname) < 0) SEQ_printf(m, "<%pK>", sym); else SEQ_printf(m, "%s", symname); } static void print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, int idx, u64 now) { #ifdef CONFIG_TIMER_STATS char tmp[TASK_COMM_LEN + 1]; #endif SEQ_printf(m, " #%d: ", idx); print_name_offset(m, taddr); SEQ_printf(m, ", "); print_name_offset(m, timer->function); SEQ_printf(m, ", S:%02lx", timer->state); #ifdef CONFIG_TIMER_STATS SEQ_printf(m, ", "); print_name_offset(m, timer->start_site); memcpy(tmp, timer->start_comm, TASK_COMM_LEN); tmp[TASK_COMM_LEN] = 0; SEQ_printf(m, ", %s/%d", tmp, timer->start_pid); #endif SEQ_printf(m, "\n"); SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", (unsigned long long)ktime_to_ns(hrtimer_get_softexpires(timer)), (unsigned long long)ktime_to_ns(hrtimer_get_expires(timer)), (long long)(ktime_to_ns(hrtimer_get_softexpires(timer)) - now), (long long)(ktime_to_ns(hrtimer_get_expires(timer)) - now)); } static void print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) { struct hrtimer *timer, tmp; unsigned long next = 0, i; struct timerqueue_node *curr; unsigned long flags; next_one: i = 0; raw_spin_lock_irqsave(&base->cpu_base->lock, flags); curr = timerqueue_getnext(&base->active); /* * Crude but we have to do this O(N*N) thing, because * we have to unlock the base when printing: */ while (curr && i < next) { curr = timerqueue_iterate_next(curr); i++; } if (curr) { timer = container_of(curr, struct hrtimer, node); tmp = *timer; raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); print_timer(m, timer, &tmp, i, now); next++; goto next_one; } raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); } static void print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) { SEQ_printf(m, " .base: %pK\n", base); SEQ_printf(m, " .index: %d\n", base->index); SEQ_printf(m, " .resolution: %Lu nsecs\n", (unsigned long long)ktime_to_ns(base->resolution)); SEQ_printf(m, " .get_time: "); print_name_offset(m, base->get_time); SEQ_printf(m, "\n"); #ifdef CONFIG_HIGH_RES_TIMERS SEQ_printf(m, " .offset: %Lu nsecs\n", (unsigned long long) ktime_to_ns(base->offset)); #endif SEQ_printf(m, "active timers:\n"); print_active_timers(m, base, now); } static void print_cpu(struct seq_file *m, int cpu, u64 now) { struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; SEQ_printf(m, "cpu: %d\n", cpu); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { SEQ_printf(m, " clock %d:\n", i); print_base(m, cpu_base->clock_base + i, now); } #define P(x) \ SEQ_printf(m, " .%-15s: %Lu\n", #x, \ (unsigned long long)(cpu_base->x)) #define P_ns(x) \ SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \ (unsigned long long)(ktime_to_ns(cpu_base->x))) #ifdef CONFIG_HIGH_RES_TIMERS P_ns(expires_next); P(hres_active); P(nr_events); P(nr_retries); P(nr_hangs); P_ns(max_hang_time); #endif #undef P #undef P_ns #ifdef CONFIG_TICK_ONESHOT # define P(x) \ SEQ_printf(m, " .%-15s: %Lu\n", #x, \ (unsigned long long)(ts->x)) # define P_ns(x) \ SEQ_printf(m, " .%-15s: %Lu nsecs\n", #x, \ (unsigned long long)(ktime_to_ns(ts->x))) { struct tick_sched *ts = tick_get_tick_sched(cpu); P(nohz_mode); P_ns(last_tick); P(tick_stopped); P(idle_jiffies); P(idle_calls); P(idle_sleeps); P_ns(idle_entrytime); P_ns(idle_waketime); P_ns(idle_exittime); P_ns(idle_sleeptime); P_ns(iowait_sleeptime); P(last_jiffies); P(next_jiffies); P_ns(idle_expires); SEQ_printf(m, "jiffies: %Lu\n", (unsigned long long)jiffies); } #endif #undef P #undef P_ns SEQ_printf(m, "\n"); } #ifdef CONFIG_GENERIC_CLOCKEVENTS static void print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) { struct clock_event_device *dev = td->evtdev; SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); if (cpu < 0) SEQ_printf(m, "Broadcast device\n"); else SEQ_printf(m, "Per CPU device: %d\n", cpu); SEQ_printf(m, "Clock Event Device: "); if (!dev) { SEQ_printf(m, "<NULL>\n"); return; } SEQ_printf(m, "%s\n", dev->name); SEQ_printf(m, " max_delta_ns: %llu\n", (unsigned long long) dev->max_delta_ns); SEQ_printf(m, " min_delta_ns: %llu\n", (unsigned long long) dev->min_delta_ns); SEQ_printf(m, " mult: %u\n", dev->mult); SEQ_printf(m, " shift: %u\n", dev->shift); SEQ_printf(m, " mode: %d\n", dev->mode); SEQ_printf(m, " next_event: %Ld nsecs\n", (unsigned long long) ktime_to_ns(dev->next_event)); SEQ_printf(m, " set_next_event: "); print_name_offset(m, dev->set_next_event); SEQ_printf(m, "\n"); SEQ_printf(m, " set_mode: "); print_name_offset(m, dev->set_mode); SEQ_printf(m, "\n"); SEQ_printf(m, " event_handler: "); print_name_offset(m, dev->event_handler); SEQ_printf(m, "\n"); SEQ_printf(m, " retries: %lu\n", dev->retries); SEQ_printf(m, "\n"); } static void timer_list_show_tickdevices_header(struct seq_file *m) { #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST print_tickdevice(m, tick_get_broadcast_device(), -1); SEQ_printf(m, "tick_broadcast_mask: %08lx\n", cpumask_bits(tick_get_broadcast_mask())[0]); #ifdef CONFIG_TICK_ONESHOT SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n", cpumask_bits(tick_get_broadcast_oneshot_mask())[0]); #endif SEQ_printf(m, "\n"); #endif } #endif static inline void timer_list_header(struct seq_file *m, u64 now) { SEQ_printf(m, "Timer List Version: v0.7\n"); SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); SEQ_printf(m, "\n"); } static int timer_list_show(struct seq_file *m, void *v) { struct timer_list_iter *iter = v; if (iter->cpu == -1 && !iter->second_pass) timer_list_header(m, iter->now); else if (!iter->second_pass) print_cpu(m, iter->cpu, iter->now); #ifdef CONFIG_GENERIC_CLOCKEVENTS else if (iter->cpu == -1 && iter->second_pass) timer_list_show_tickdevices_header(m); else print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu); #endif return 0; } void sysrq_timer_list_show(void) { u64 now = ktime_to_ns(ktime_get()); int cpu; timer_list_header(NULL, now); for_each_online_cpu(cpu) print_cpu(NULL, cpu, now); #ifdef CONFIG_GENERIC_CLOCKEVENTS timer_list_show_tickdevices_header(NULL); for_each_online_cpu(cpu) print_tickdevice(NULL, tick_get_device(cpu), cpu); #endif return; } static void *move_iter(struct timer_list_iter *iter, loff_t offset) { for (; offset; offset--) { iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); if (iter->cpu >= nr_cpu_ids) { #ifdef CONFIG_GENERIC_CLOCKEVENTS if (!iter->second_pass) { iter->cpu = -1; iter->second_pass = true; } else return NULL; #else return NULL; #endif } } return iter; } static void *timer_list_start(struct seq_file *file, loff_t *offset) { struct timer_list_iter *iter = file->private; if (!*offset) iter->now = ktime_to_ns(ktime_get()); iter->cpu = -1; iter->second_pass = false; return move_iter(iter, *offset); } static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) { struct timer_list_iter *iter = file->private; ++*offset; return move_iter(iter, 1); } static void timer_list_stop(struct seq_file *seq, void *v) { } static const struct seq_operations timer_list_sops = { .start = timer_list_start, .next = timer_list_next, .stop = timer_list_stop, .show = timer_list_show, }; static int timer_list_open(struct inode *inode, struct file *filp) { return seq_open_private(filp, &timer_list_sops, sizeof(struct timer_list_iter)); } static const struct file_operations timer_list_fops = { .open = timer_list_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static int __init init_timer_list_procfs(void) { struct proc_dir_entry *pe; pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); if (!pe) return -ENOMEM; return 0; } __initcall(init_timer_list_procfs);
gpl-2.0
AOKPSaber/kernel_samsung_p4
arch/arm/mach-ux500/id.c
2985
2229
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/setup.h> struct dbx500_asic_id dbx500_id; static unsigned int ux500_read_asicid(phys_addr_t addr) { phys_addr_t base = addr & ~0xfff; struct map_desc desc = { .virtual = IO_ADDRESS(base), .pfn = __phys_to_pfn(base), .length = SZ_16K, .type = MT_DEVICE, }; iotable_init(&desc, 1); /* As in devicemaps_init() */ local_flush_tlb_all(); flush_cache_all(); return readl(__io_address(addr)); } static void ux500_print_soc_info(unsigned int asicid) { unsigned int rev = dbx500_revision(); pr_info("DB%4x ", dbx500_partnumber()); if (rev == 0x01) pr_cont("Early Drop"); else if (rev >= 0xA0) pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf); else pr_cont("Unknown"); pr_cont(" [%#010x]\n", asicid); } static unsigned int partnumber(unsigned int asicid) { return (asicid >> 8) & 0xffff; } /* * SOC MIDR ASICID ADDRESS ASICID VALUE * DB8500ed 0x410fc090 0x9001FFF4 0x00850001 * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0 * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1 * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0 * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0 */ void __init ux500_map_io(void) { unsigned int cpuid = read_cpuid_id(); unsigned int asicid = 0; phys_addr_t addr = 0; switch (cpuid) { case 0x410fc090: /* DB8500ed */ case 0x411fc091: /* DB8500v1 */ addr = 0x9001FFF4; break; case 0x412fc091: /* DB8500v2 / DB5500v1 */ asicid = ux500_read_asicid(0x9001DBF4); if (partnumber(asicid) == 0x8500) /* DB8500v2 */ break; /* DB5500v1 */ addr = 0x9001FFF4; break; } if (addr) asicid = ux500_read_asicid(addr); if (!asicid) { pr_err("Unable to identify SoC\n"); ux500_unknown_soc(); } dbx500_id.process = asicid >> 24; dbx500_id.partnumber = partnumber(asicid); dbx500_id.revision = asicid & 0xff; ux500_print_soc_info(asicid); }
gpl-2.0
Hellybean/android_kernel_amazon_otter-common
net/core/netevent.c
3241
2077
/* * Network event notifiers * * Authors: * Tom Tucker <tom@opengridcomputing.com> * Steve Wise <swise@opengridcomputing.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: */ #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <net/netevent.h> static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); /** * register_netevent_notifier - register a netevent notifier block * @nb: notifier * * Register a notifier to be called when a netevent occurs. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. */ int register_netevent_notifier(struct notifier_block *nb) { int err; err = atomic_notifier_chain_register(&netevent_notif_chain, nb); return err; } EXPORT_SYMBOL_GPL(register_netevent_notifier); /** * netevent_unregister_notifier - unregister a netevent notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_neigh_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. */ int unregister_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(unregister_netevent_notifier); /** * call_netevent_notifiers - call all netevent notifier blocks * @val: value passed unmodified to notifier function * @v: pointer passed unmodified to notifier function * * Call all neighbour notifier blocks. Parameters and return value * are as for notifier_call_chain(). */ int call_netevent_notifiers(unsigned long val, void *v) { return atomic_notifier_call_chain(&netevent_notif_chain, val, v); } EXPORT_SYMBOL_GPL(call_netevent_notifiers);
gpl-2.0
tdm/kernel_huawei_msm8928
fs/nfs/nfs4filelayoutdev.c
4777
20978
/* * Device operations for the pnfs nfs4 file layout driver. * * Copyright (c) 2002 * The Regents of the University of Michigan * All Rights Reserved * * Dean Hildebrand <dhildebz@umich.edu> * Garth Goodson <Garth.Goodson@netapp.com> * * Permission is granted to use, copy, create derivative works, and * redistribute this software and such derivative works for any purpose, * so long as the name of the University of Michigan is not used in * any advertising or publicity pertaining to the use or distribution * of this software without specific, written prior authorization. If * the above copyright notice or any other identification of the * University of Michigan is included in any copy of any portion of * this software, then the disclaimer below must also be included. * * This software is provided as is, without representation or warranty * of any kind either express or implied, including without limitation * the implied warranties of merchantability, fitness for a particular * purpose, or noninfringement. The Regents of the University of * Michigan shall not be liable for any damages, including special, * indirect, incidental, or consequential damages, with respect to any * claim arising out of or in connection with the use of the software, * even if it has been or is hereafter advised of the possibility of * such damages. */ #include <linux/nfs_fs.h> #include <linux/vmalloc.h> #include "internal.h" #include "nfs4filelayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD /* * Data server cache * * Data servers can be mapped to different device ids. * nfs4_pnfs_ds reference counting * - set to 1 on allocation * - incremented when a device id maps a data server already in the cache. * - decremented when deviceid is removed from the cache. */ static DEFINE_SPINLOCK(nfs4_ds_cache_lock); static LIST_HEAD(nfs4_data_server_cache); /* Debug routines */ void print_ds(struct nfs4_pnfs_ds *ds) { if (ds == NULL) { printk("%s NULL device\n", __func__); return; } printk(" ds %s\n" " ref count %d\n" " client %p\n" " cl_exchange_flags %x\n", ds->ds_remotestr, atomic_read(&ds->ds_count), ds->ds_clp, ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); } static bool same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) { struct sockaddr_in *a, *b; struct sockaddr_in6 *a6, *b6; if (addr1->sa_family != addr2->sa_family) return false; switch (addr1->sa_family) { case AF_INET: a = (struct sockaddr_in *)addr1; b = (struct sockaddr_in *)addr2; if (a->sin_addr.s_addr == b->sin_addr.s_addr && a->sin_port == b->sin_port) return true; break; case AF_INET6: a6 = (struct sockaddr_in6 *)addr1; b6 = (struct sockaddr_in6 *)addr2; /* LINKLOCAL addresses must have matching scope_id */ if (ipv6_addr_scope(&a6->sin6_addr) == IPV6_ADDR_SCOPE_LINKLOCAL && a6->sin6_scope_id != b6->sin6_scope_id) return false; if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && a6->sin6_port == b6->sin6_port) return true; break; default: dprintk("%s: unhandled address family: %u\n", __func__, addr1->sa_family); return false; } return false; } static bool _same_data_server_addrs_locked(const struct list_head *dsaddrs1, const struct list_head *dsaddrs2) { struct nfs4_pnfs_ds_addr *da1, *da2; /* step through both lists, comparing as we go */ for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); da1 != NULL && da2 != NULL; da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { if (!same_sockaddr((struct sockaddr *)&da1->da_addr, (struct sockaddr *)&da2->da_addr)) return false; } if (da1 == NULL && da2 == NULL) return true; return false; } /* * Lookup DS by addresses. nfs4_ds_cache_lock is held */ static struct nfs4_pnfs_ds * _data_server_lookup_locked(const struct list_head *dsaddrs) { struct nfs4_pnfs_ds *ds; list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) return ds; return NULL; } /* * Create an rpc connection to the nfs4_pnfs_ds data server * Currently only supports IPv4 and IPv6 addresses */ static int nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) { struct nfs_client *clp = ERR_PTR(-EIO); struct nfs4_pnfs_ds_addr *da; int status = 0; dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); BUG_ON(list_empty(&ds->ds_addrs)); list_for_each_entry(da, &ds->ds_addrs, da_node) { dprintk("%s: DS %s: trying address %s\n", __func__, ds->ds_remotestr, da->da_remotestr); clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&da->da_addr, da->da_addrlen, IPPROTO_TCP); if (!IS_ERR(clp)) break; } if (IS_ERR(clp)) { status = PTR_ERR(clp); goto out; } if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) { if (!is_ds_client(clp)) { status = -ENODEV; goto out_put; } ds->ds_clp = clp; dprintk("%s [existing] server=%s\n", __func__, ds->ds_remotestr); goto out; } /* * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to * be equal to the MDS lease. Renewal is scheduled in create_session. */ spin_lock(&mds_srv->nfs_client->cl_lock); clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time; spin_unlock(&mds_srv->nfs_client->cl_lock); clp->cl_last_renewal = jiffies; /* New nfs_client */ status = nfs4_init_ds_session(clp); if (status) goto out_put; ds->ds_clp = clp; dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); out: return status; out_put: nfs_put_client(clp); goto out; } static void destroy_ds(struct nfs4_pnfs_ds *ds) { struct nfs4_pnfs_ds_addr *da; dprintk("--> %s\n", __func__); ifdebug(FACILITY) print_ds(ds); if (ds->ds_clp) nfs_put_client(ds->ds_clp); while (!list_empty(&ds->ds_addrs)) { da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); list_del_init(&da->da_node); kfree(da->da_remotestr); kfree(da); } kfree(ds->ds_remotestr); kfree(ds); } void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) { struct nfs4_pnfs_ds *ds; int i; nfs4_print_deviceid(&dsaddr->id_node.deviceid); for (i = 0; i < dsaddr->ds_num; i++) { ds = dsaddr->ds_list[i]; if (ds != NULL) { if (atomic_dec_and_lock(&ds->ds_count, &nfs4_ds_cache_lock)) { list_del_init(&ds->ds_node); spin_unlock(&nfs4_ds_cache_lock); destroy_ds(ds); } } } kfree(dsaddr->stripe_indices); kfree(dsaddr); } /* * Create a string with a human readable address and port to avoid * complicated setup around many dprinks. */ static char * nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) { struct nfs4_pnfs_ds_addr *da; char *remotestr; size_t len; char *p; len = 3; /* '{', '}' and eol */ list_for_each_entry(da, dsaddrs, da_node) { len += strlen(da->da_remotestr) + 1; /* string plus comma */ } remotestr = kzalloc(len, gfp_flags); if (!remotestr) return NULL; p = remotestr; *(p++) = '{'; len--; list_for_each_entry(da, dsaddrs, da_node) { size_t ll = strlen(da->da_remotestr); if (ll > len) goto out_err; memcpy(p, da->da_remotestr, ll); p += ll; len -= ll; if (len < 1) goto out_err; (*p++) = ','; len--; } if (len < 2) goto out_err; *(p++) = '}'; *p = '\0'; return remotestr; out_err: kfree(remotestr); return NULL; } static struct nfs4_pnfs_ds * nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) { struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; char *remotestr; if (list_empty(dsaddrs)) { dprintk("%s: no addresses defined\n", __func__); goto out; } ds = kzalloc(sizeof(*ds), gfp_flags); if (!ds) goto out; /* this is only used for debugging, so it's ok if its NULL */ remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); spin_lock(&nfs4_ds_cache_lock); tmp_ds = _data_server_lookup_locked(dsaddrs); if (tmp_ds == NULL) { INIT_LIST_HEAD(&ds->ds_addrs); list_splice_init(dsaddrs, &ds->ds_addrs); ds->ds_remotestr = remotestr; atomic_set(&ds->ds_count, 1); INIT_LIST_HEAD(&ds->ds_node); ds->ds_clp = NULL; list_add(&ds->ds_node, &nfs4_data_server_cache); dprintk("%s add new data server %s\n", __func__, ds->ds_remotestr); } else { kfree(remotestr); kfree(ds); atomic_inc(&tmp_ds->ds_count); dprintk("%s data server %s found, inc'ed ds_count to %d\n", __func__, tmp_ds->ds_remotestr, atomic_read(&tmp_ds->ds_count)); ds = tmp_ds; } spin_unlock(&nfs4_ds_cache_lock); out: return ds; } /* * Currently only supports ipv4, ipv6 and one multi-path address. */ static struct nfs4_pnfs_ds_addr * decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags) { struct nfs4_pnfs_ds_addr *da = NULL; char *buf, *portstr; __be16 port; int nlen, rlen; int tmp[2]; __be32 *p; char *netid, *match_netid; size_t len, match_netid_len; char *startsep = ""; char *endsep = ""; /* r_netid */ p = xdr_inline_decode(streamp, 4); if (unlikely(!p)) goto out_err; nlen = be32_to_cpup(p++); p = xdr_inline_decode(streamp, nlen); if (unlikely(!p)) goto out_err; netid = kmalloc(nlen+1, gfp_flags); if (unlikely(!netid)) goto out_err; netid[nlen] = '\0'; memcpy(netid, p, nlen); /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ p = xdr_inline_decode(streamp, 4); if (unlikely(!p)) goto out_free_netid; rlen = be32_to_cpup(p); p = xdr_inline_decode(streamp, rlen); if (unlikely(!p)) goto out_free_netid; /* port is ".ABC.DEF", 8 chars max */ if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { dprintk("%s: Invalid address, length %d\n", __func__, rlen); goto out_free_netid; } buf = kmalloc(rlen + 1, gfp_flags); if (!buf) { dprintk("%s: Not enough memory\n", __func__); goto out_free_netid; } buf[rlen] = '\0'; memcpy(buf, p, rlen); /* replace port '.' with '-' */ portstr = strrchr(buf, '.'); if (!portstr) { dprintk("%s: Failed finding expected dot in port\n", __func__); goto out_free_buf; } *portstr = '-'; /* find '.' between address and port */ portstr = strrchr(buf, '.'); if (!portstr) { dprintk("%s: Failed finding expected dot between address and " "port\n", __func__); goto out_free_buf; } *portstr = '\0'; da = kzalloc(sizeof(*da), gfp_flags); if (unlikely(!da)) goto out_free_buf; INIT_LIST_HEAD(&da->da_node); if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, sizeof(da->da_addr))) { dprintk("%s: error parsing address %s\n", __func__, buf); goto out_free_da; } portstr++; sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); port = htons((tmp[0] << 8) | (tmp[1])); switch (da->da_addr.ss_family) { case AF_INET: ((struct sockaddr_in *)&da->da_addr)->sin_port = port; da->da_addrlen = sizeof(struct sockaddr_in); match_netid = "tcp"; match_netid_len = 3; break; case AF_INET6: ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; da->da_addrlen = sizeof(struct sockaddr_in6); match_netid = "tcp6"; match_netid_len = 4; startsep = "["; endsep = "]"; break; default: dprintk("%s: unsupported address family: %u\n", __func__, da->da_addr.ss_family); goto out_free_da; } if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", __func__, netid, match_netid); goto out_free_da; } /* save human readable address */ len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; da->da_remotestr = kzalloc(len, gfp_flags); /* NULL is ok, only used for dprintk */ if (da->da_remotestr) snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, buf, endsep, ntohs(port)); dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); kfree(buf); kfree(netid); return da; out_free_da: kfree(da); out_free_buf: dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); kfree(buf); out_free_netid: kfree(netid); out_err: return NULL; } /* Decode opaque device data and return the result */ static struct nfs4_file_layout_dsaddr* decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags) { int i; u32 cnt, num; u8 *indexp; __be32 *p; u8 *stripe_indices; u8 max_stripe_index; struct nfs4_file_layout_dsaddr *dsaddr = NULL; struct xdr_stream stream; struct xdr_buf buf; struct page *scratch; struct list_head dsaddrs; struct nfs4_pnfs_ds_addr *da; /* set up xdr stream */ scratch = alloc_page(gfp_flags); if (!scratch) goto out_err; xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); /* Get the stripe count (number of stripe index) */ p = xdr_inline_decode(&stream, 4); if (unlikely(!p)) goto out_err_free_scratch; cnt = be32_to_cpup(p); dprintk("%s stripe count %d\n", __func__, cnt); if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) { printk(KERN_WARNING "NFS: %s: stripe count %d greater than " "supported maximum %d\n", __func__, cnt, NFS4_PNFS_MAX_STRIPE_CNT); goto out_err_free_scratch; } /* read stripe indices */ stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags); if (!stripe_indices) goto out_err_free_scratch; p = xdr_inline_decode(&stream, cnt << 2); if (unlikely(!p)) goto out_err_free_stripe_indices; indexp = &stripe_indices[0]; max_stripe_index = 0; for (i = 0; i < cnt; i++) { *indexp = be32_to_cpup(p++); max_stripe_index = max(max_stripe_index, *indexp); indexp++; } /* Check the multipath list count */ p = xdr_inline_decode(&stream, 4); if (unlikely(!p)) goto out_err_free_stripe_indices; num = be32_to_cpup(p); dprintk("%s ds_num %u\n", __func__, num); if (num > NFS4_PNFS_MAX_MULTI_CNT) { printk(KERN_WARNING "NFS: %s: multipath count %d greater than " "supported maximum %d\n", __func__, num, NFS4_PNFS_MAX_MULTI_CNT); goto out_err_free_stripe_indices; } /* validate stripe indices are all < num */ if (max_stripe_index >= num) { printk(KERN_WARNING "NFS: %s: stripe index %u >= num ds %u\n", __func__, max_stripe_index, num); goto out_err_free_stripe_indices; } dsaddr = kzalloc(sizeof(*dsaddr) + (sizeof(struct nfs4_pnfs_ds *) * (num - 1)), gfp_flags); if (!dsaddr) goto out_err_free_stripe_indices; dsaddr->stripe_count = cnt; dsaddr->stripe_indices = stripe_indices; stripe_indices = NULL; dsaddr->ds_num = num; nfs4_init_deviceid_node(&dsaddr->id_node, NFS_SERVER(ino)->pnfs_curr_ld, NFS_SERVER(ino)->nfs_client, &pdev->dev_id); INIT_LIST_HEAD(&dsaddrs); for (i = 0; i < dsaddr->ds_num; i++) { int j; u32 mp_count; p = xdr_inline_decode(&stream, 4); if (unlikely(!p)) goto out_err_free_deviceid; mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } if (list_empty(&dsaddrs)) { dprintk("%s: no suitable DS addresses found\n", __func__); goto out_err_free_deviceid; } dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); if (!dsaddr->ds_list[i]) goto out_err_drain_dsaddrs; /* If DS was already in cache, free ds addrs */ while (!list_empty(&dsaddrs)) { da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, da_node); list_del_init(&da->da_node); kfree(da->da_remotestr); kfree(da); } } __free_page(scratch); return dsaddr; out_err_drain_dsaddrs: while (!list_empty(&dsaddrs)) { da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, da_node); list_del_init(&da->da_node); kfree(da->da_remotestr); kfree(da); } out_err_free_deviceid: nfs4_fl_free_deviceid(dsaddr); /* stripe_indicies was part of dsaddr */ goto out_err_free_scratch; out_err_free_stripe_indices: kfree(stripe_indices); out_err_free_scratch: __free_page(scratch); out_err: dprintk("%s ERROR: returning NULL\n", __func__); return NULL; } /* * Decode the opaque device specified in 'dev' and add it to the cache of * available devices. */ static struct nfs4_file_layout_dsaddr * decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags) { struct nfs4_deviceid_node *d; struct nfs4_file_layout_dsaddr *n, *new; new = decode_device(inode, dev, gfp_flags); if (!new) { printk(KERN_WARNING "NFS: %s: Could not decode or add device\n", __func__); return NULL; } d = nfs4_insert_deviceid_node(&new->id_node); n = container_of(d, struct nfs4_file_layout_dsaddr, id_node); if (n != new) { nfs4_fl_free_deviceid(new); return n; } return new; } /* * Retrieve the information for dev_id, add it to the list * of available devices, and return it. */ struct nfs4_file_layout_dsaddr * get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags) { struct pnfs_device *pdev = NULL; u32 max_resp_sz; int max_pages; struct page **pages = NULL; struct nfs4_file_layout_dsaddr *dsaddr = NULL; int rc, i; struct nfs_server *server = NFS_SERVER(inode); /* * Use the session max response size as the basis for setting * GETDEVICEINFO's maxcount */ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; max_pages = nfs_page_array_len(0, max_resp_sz); dprintk("%s inode %p max_resp_sz %u max_pages %d\n", __func__, inode, max_resp_sz, max_pages); pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags); if (pdev == NULL) return NULL; pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags); if (pages == NULL) { kfree(pdev); return NULL; } for (i = 0; i < max_pages; i++) { pages[i] = alloc_page(gfp_flags); if (!pages[i]) goto out_free; } memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id)); pdev->layout_type = LAYOUT_NFSV4_1_FILES; pdev->pages = pages; pdev->pgbase = 0; pdev->pglen = PAGE_SIZE * max_pages; pdev->mincount = 0; rc = nfs4_proc_getdeviceinfo(server, pdev); dprintk("%s getdevice info returns %d\n", __func__, rc); if (rc) goto out_free; /* * Found new device, need to decode it and then add it to the * list of known devices for this mountpoint. */ dsaddr = decode_and_add_device(inode, pdev, gfp_flags); out_free: for (i = 0; i < max_pages; i++) __free_page(pages[i]); kfree(pages); kfree(pdev); dprintk("<-- %s dsaddr %p\n", __func__, dsaddr); return dsaddr; } void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) { nfs4_put_deviceid_node(&dsaddr->id_node); } /* * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit * Then: ((res + fsi) % dsaddr->stripe_count) */ u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); u64 tmp; tmp = offset - flseg->pattern_offset; do_div(tmp, flseg->stripe_unit); tmp += flseg->first_stripe_index; return do_div(tmp, flseg->dsaddr->stripe_count); } u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j) { return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j]; } struct nfs_fh * nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) { struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg); u32 i; if (flseg->stripe_type == STRIPE_SPARSE) { if (flseg->num_fh == 1) i = 0; else if (flseg->num_fh == 0) /* Use the MDS OPEN fh set in nfs_read_rpcsetup */ return NULL; else i = nfs4_fl_calc_ds_index(lseg, j); } else i = j; return flseg->fh_array[i]; } static void filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr, int err, const char *ds_remotestr) { u32 *p = (u32 *)&dsaddr->id_node.deviceid; printk(KERN_ERR "NFS: data server %s connection error %d." " Deviceid [%x%x%x%x] marked out of use.\n", ds_remotestr, err, p[0], p[1], p[2], p[3]); spin_lock(&nfs4_ds_cache_lock); dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY; spin_unlock(&nfs4_ds_cache_lock); } struct nfs4_pnfs_ds * nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) { struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; if (ds == NULL) { printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", __func__, ds_idx); return NULL; } if (!ds->ds_clp) { struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); int err; if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) { /* Already tried to connect, don't try again */ dprintk("%s Deviceid marked out of use\n", __func__); return NULL; } err = nfs4_ds_connect(s, ds); if (err) { filelayout_mark_devid_negative(dsaddr, err, ds->ds_remotestr); return NULL; } } return ds; }
gpl-2.0
MoKee/android_kernel_sony_msm8974pro
drivers/hwmon/ntc_thermistor.c
5033
11455
/* * ntc_thermistor.c - NTC Thermistors * * Copyright (C) 2010 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/math64.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/platform_data/ntc_thermistor.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> struct ntc_compensation { int temp_C; unsigned int ohm; }; /* * A compensation table should be sorted by the values of .ohm * in descending order. * The following compensation tables are from the specification of Murata NTC * Thermistors Datasheet */ const struct ntc_compensation ncpXXwb473[] = { { .temp_C = -40, .ohm = 1747920 }, { .temp_C = -35, .ohm = 1245428 }, { .temp_C = -30, .ohm = 898485 }, { .temp_C = -25, .ohm = 655802 }, { .temp_C = -20, .ohm = 483954 }, { .temp_C = -15, .ohm = 360850 }, { .temp_C = -10, .ohm = 271697 }, { .temp_C = -5, .ohm = 206463 }, { .temp_C = 0, .ohm = 158214 }, { .temp_C = 5, .ohm = 122259 }, { .temp_C = 10, .ohm = 95227 }, { .temp_C = 15, .ohm = 74730 }, { .temp_C = 20, .ohm = 59065 }, { .temp_C = 25, .ohm = 47000 }, { .temp_C = 30, .ohm = 37643 }, { .temp_C = 35, .ohm = 30334 }, { .temp_C = 40, .ohm = 24591 }, { .temp_C = 45, .ohm = 20048 }, { .temp_C = 50, .ohm = 16433 }, { .temp_C = 55, .ohm = 13539 }, { .temp_C = 60, .ohm = 11209 }, { .temp_C = 65, .ohm = 9328 }, { .temp_C = 70, .ohm = 7798 }, { .temp_C = 75, .ohm = 6544 }, { .temp_C = 80, .ohm = 5518 }, { .temp_C = 85, .ohm = 4674 }, { .temp_C = 90, .ohm = 3972 }, { .temp_C = 95, .ohm = 3388 }, { .temp_C = 100, .ohm = 2902 }, { .temp_C = 105, .ohm = 2494 }, { .temp_C = 110, .ohm = 2150 }, { .temp_C = 115, .ohm = 1860 }, { .temp_C = 120, .ohm = 1615 }, { .temp_C = 125, .ohm = 1406 }, }; const struct ntc_compensation ncpXXwl333[] = { { .temp_C = -40, .ohm = 1610154 }, { .temp_C = -35, .ohm = 1130850 }, { .temp_C = -30, .ohm = 802609 }, { .temp_C = -25, .ohm = 575385 }, { .temp_C = -20, .ohm = 416464 }, { .temp_C = -15, .ohm = 304219 }, { .temp_C = -10, .ohm = 224193 }, { .temp_C = -5, .ohm = 166623 }, { .temp_C = 0, .ohm = 124850 }, { .temp_C = 5, .ohm = 94287 }, { .temp_C = 10, .ohm = 71747 }, { .temp_C = 15, .ohm = 54996 }, { .temp_C = 20, .ohm = 42455 }, { .temp_C = 25, .ohm = 33000 }, { .temp_C = 30, .ohm = 25822 }, { .temp_C = 35, .ohm = 20335 }, { .temp_C = 40, .ohm = 16115 }, { .temp_C = 45, .ohm = 12849 }, { .temp_C = 50, .ohm = 10306 }, { .temp_C = 55, .ohm = 8314 }, { .temp_C = 60, .ohm = 6746 }, { .temp_C = 65, .ohm = 5503 }, { .temp_C = 70, .ohm = 4513 }, { .temp_C = 75, .ohm = 3721 }, { .temp_C = 80, .ohm = 3084 }, { .temp_C = 85, .ohm = 2569 }, { .temp_C = 90, .ohm = 2151 }, { .temp_C = 95, .ohm = 1809 }, { .temp_C = 100, .ohm = 1529 }, { .temp_C = 105, .ohm = 1299 }, { .temp_C = 110, .ohm = 1108 }, { .temp_C = 115, .ohm = 949 }, { .temp_C = 120, .ohm = 817 }, { .temp_C = 125, .ohm = 707 }, }; struct ntc_data { struct device *hwmon_dev; struct ntc_thermistor_platform_data *pdata; const struct ntc_compensation *comp; struct device *dev; int n_comp; char name[PLATFORM_NAME_SIZE]; }; static inline u64 div64_u64_safe(u64 dividend, u64 divisor) { if (divisor == 0 && dividend == 0) return 0; if (divisor == 0) return UINT_MAX; return div64_u64(dividend, divisor); } static unsigned int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uV) { struct ntc_thermistor_platform_data *pdata = data->pdata; u64 mV = uV / 1000; u64 pmV = pdata->pullup_uV / 1000; u64 N, puO, pdO; puO = pdata->pullup_ohm; pdO = pdata->pulldown_ohm; if (mV == 0) { if (pdata->connect == NTC_CONNECTED_POSITIVE) return UINT_MAX; return 0; } if (mV >= pmV) return (pdata->connect == NTC_CONNECTED_POSITIVE) ? 0 : UINT_MAX; if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0) N = div64_u64_safe(pdO * (pmV - mV), mV); else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0) N = div64_u64_safe(puO * mV, pmV - mV); else if (pdata->connect == NTC_CONNECTED_POSITIVE) N = div64_u64_safe(pdO * puO * (pmV - mV), puO * mV - pdO * (pmV - mV)); else N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV); return (unsigned int) N; } static int lookup_comp(struct ntc_data *data, unsigned int ohm, int *i_low, int *i_high) { int start, end, mid = -1; /* Do a binary search on compensation table */ start = 0; end = data->n_comp; while (end > start) { mid = start + (end - start) / 2; if (data->comp[mid].ohm < ohm) end = mid; else if (data->comp[mid].ohm > ohm) start = mid + 1; else break; } if (mid == 0) { if (data->comp[mid].ohm > ohm) { *i_high = mid; *i_low = mid + 1; return 0; } else { *i_low = mid; *i_high = -1; return -EINVAL; } } if (mid == (data->n_comp - 1)) { if (data->comp[mid].ohm <= ohm) { *i_low = mid; *i_high = mid - 1; return 0; } else { *i_low = -1; *i_high = mid; return -EINVAL; } } if (data->comp[mid].ohm <= ohm) { *i_low = mid; *i_high = mid - 1; } else { *i_low = mid + 1; *i_high = mid; } return 0; } static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp) { int low, high; int ret; ret = lookup_comp(data, ohm, &low, &high); if (ret) { /* Unable to use linear approximation */ if (low != -1) *temp = data->comp[low].temp_C * 1000; else if (high != -1) *temp = data->comp[high].temp_C * 1000; else return ret; } else { *temp = data->comp[low].temp_C * 1000 + ((data->comp[high].temp_C - data->comp[low].temp_C) * 1000 * ((int)ohm - (int)data->comp[low].ohm)) / ((int)data->comp[high].ohm - (int)data->comp[low].ohm); } return 0; } static int ntc_thermistor_read(struct ntc_data *data, int *temp) { int ret; int read_ohm, read_uV; unsigned int ohm = 0; if (data->pdata->read_ohm) { read_ohm = data->pdata->read_ohm(); if (read_ohm < 0) return read_ohm; ohm = (unsigned int)read_ohm; } if (data->pdata->read_uV) { read_uV = data->pdata->read_uV(); if (read_uV < 0) return read_uV; ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV); } ret = get_temp_mC(data, ohm, temp); if (ret) { dev_dbg(data->dev, "Sensor reading function not available.\n"); return ret; } return 0; } static ssize_t ntc_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); return sprintf(buf, "%s\n", data->name); } static ssize_t ntc_show_type(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "4\n"); } static ssize_t ntc_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct ntc_data *data = dev_get_drvdata(dev); int temp, ret; ret = ntc_thermistor_read(data, &temp); if (ret) return ret; return sprintf(buf, "%d\n", temp); } static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0); static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL); static struct attribute *ntc_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_temp1_type.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; static const struct attribute_group ntc_attr_group = { .attrs = ntc_attributes, }; static int __devinit ntc_thermistor_probe(struct platform_device *pdev) { struct ntc_data *data; struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data; int ret = 0; if (!pdata) { dev_err(&pdev->dev, "No platform init data supplied.\n"); return -ENODEV; } /* Either one of the two is required. */ if (!pdata->read_uV && !pdata->read_ohm) { dev_err(&pdev->dev, "Both read_uV and read_ohm missing." "Need either one of the two.\n"); return -EINVAL; } if (pdata->read_uV && pdata->read_ohm) { dev_warn(&pdev->dev, "Only one of read_uV and read_ohm " "is needed; ignoring read_uV.\n"); pdata->read_uV = NULL; } if (pdata->read_uV && (pdata->pullup_uV == 0 || (pdata->pullup_ohm == 0 && pdata->connect == NTC_CONNECTED_GROUND) || (pdata->pulldown_ohm == 0 && pdata->connect == NTC_CONNECTED_POSITIVE) || (pdata->connect != NTC_CONNECTED_POSITIVE && pdata->connect != NTC_CONNECTED_GROUND))) { dev_err(&pdev->dev, "Required data to use read_uV not " "supplied.\n"); return -EINVAL; } data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL); if (!data) return -ENOMEM; data->dev = &pdev->dev; data->pdata = pdata; strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE); switch (pdev->id_entry->driver_data) { case TYPE_NCPXXWB473: data->comp = ncpXXwb473; data->n_comp = ARRAY_SIZE(ncpXXwb473); break; case TYPE_NCPXXWL333: data->comp = ncpXXwl333; data->n_comp = ARRAY_SIZE(ncpXXwl333); break; default: dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n", pdev->id_entry->driver_data, pdev->id_entry->name); ret = -EINVAL; goto err; } platform_set_drvdata(pdev, data); ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group); if (ret) { dev_err(data->dev, "unable to create sysfs files\n"); goto err; } data->hwmon_dev = hwmon_device_register(data->dev); if (IS_ERR_OR_NULL(data->hwmon_dev)) { dev_err(data->dev, "unable to register as hwmon device.\n"); ret = -EINVAL; goto err_after_sysfs; } dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n", pdev->name, pdev->id, pdev->id_entry->name, pdev->id_entry->driver_data); return 0; err_after_sysfs: sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); err: kfree(data); return ret; } static int __devexit ntc_thermistor_remove(struct platform_device *pdev) { struct ntc_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&data->dev->kobj, &ntc_attr_group); platform_set_drvdata(pdev, NULL); kfree(data); return 0; } static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, { "ncp21wb473", TYPE_NCPXXWB473 }, { "ncp03wb473", TYPE_NCPXXWB473 }, { "ncp15wl333", TYPE_NCPXXWL333 }, { }, }; static struct platform_driver ntc_thermistor_driver = { .driver = { .name = "ntc-thermistor", .owner = THIS_MODULE, }, .probe = ntc_thermistor_probe, .remove = __devexit_p(ntc_thermistor_remove), .id_table = ntc_thermistor_id, }; module_platform_driver(ntc_thermistor_driver); MODULE_DESCRIPTION("NTC Thermistor Driver"); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ntc-thermistor");
gpl-2.0
Andiry/Linux-xHCI-development
net/ieee802154/af_ieee802154.c
8361
8816
/* * IEEE802154.4 socket interface * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> */ #include <linux/net.h> #include <linux/capability.h> #include <linux/module.h> #include <linux/if_arp.h> #include <linux/if.h> #include <linux/termios.h> /* For TIOCOUTQ/INQ */ #include <linux/list.h> #include <linux/slab.h> #include <net/datalink.h> #include <net/psnap.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/route.h> #include <net/af_ieee802154.h> #include <net/ieee802154_netdev.h> #include "af802154.h" /* * Utility function for families */ struct net_device *ieee802154_get_dev(struct net *net, struct ieee802154_addr *addr) { struct net_device *dev = NULL; struct net_device *tmp; u16 pan_id, short_addr; switch (addr->addr_type) { case IEEE802154_ADDR_LONG: rcu_read_lock(); dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr); if (dev) dev_hold(dev); rcu_read_unlock(); break; case IEEE802154_ADDR_SHORT: if (addr->pan_id == 0xffff || addr->short_addr == IEEE802154_ADDR_UNDEF || addr->short_addr == 0xffff) break; rtnl_lock(); for_each_netdev(net, tmp) { if (tmp->type != ARPHRD_IEEE802154) continue; pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp); short_addr = ieee802154_mlme_ops(tmp)->get_short_addr(tmp); if (pan_id == addr->pan_id && short_addr == addr->short_addr) { dev = tmp; dev_hold(dev); break; } } rtnl_unlock(); break; default: pr_warning("Unsupported ieee802154 address type: %d\n", addr->addr_type); break; } return dev; } static int ieee802154_sock_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk) { sock->sk = NULL; sk->sk_prot->close(sk, 0); } return 0; } static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; return sk->sk_prot->sendmsg(iocb, sk, msg, len); } static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; if (sk->sk_prot->bind) return sk->sk_prot->bind(sk, uaddr, addr_len); return sock_no_bind(sock, uaddr, addr_len); } static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; if (addr_len < sizeof(uaddr->sa_family)) return -EINVAL; if (uaddr->sa_family == AF_UNSPEC) return sk->sk_prot->disconnect(sk, flags); return sk->sk_prot->connect(sk, uaddr, addr_len); } static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg, unsigned int cmd) { struct ifreq ifr; int ret = -ENOIOCTLCMD; struct net_device *dev; if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; ifr.ifr_name[IFNAMSIZ-1] = 0; dev_load(sock_net(sk), ifr.ifr_name); dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); if (!dev) return -ENODEV; if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = -EFAULT; dev_put(dev); return ret; } static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; switch (cmd) { case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); case SIOCGIFADDR: case SIOCSIFADDR: return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg, cmd); default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; return sk->sk_prot->ioctl(sk, cmd, arg); } } static const struct proto_ops ieee802154_raw_ops = { .family = PF_IEEE802154, .owner = THIS_MODULE, .release = ieee802154_sock_release, .bind = ieee802154_sock_bind, .connect = ieee802154_sock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = ieee802154_sock_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static const struct proto_ops ieee802154_dgram_ops = { .family = PF_IEEE802154, .owner = THIS_MODULE, .release = ieee802154_sock_release, .bind = ieee802154_sock_bind, .connect = ieee802154_sock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = ieee802154_sock_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = ieee802154_sock_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; /* * Create a socket. Initialise the socket, blank the addresses * set the state. */ static int ieee802154_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int rc; struct proto *proto; const struct proto_ops *ops; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; switch (sock->type) { case SOCK_RAW: proto = &ieee802154_raw_prot; ops = &ieee802154_raw_ops; break; case SOCK_DGRAM: proto = &ieee802154_dgram_prot; ops = &ieee802154_dgram_ops; break; default: rc = -ESOCKTNOSUPPORT; goto out; } rc = -ENOMEM; sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto); if (!sk) goto out; rc = 0; sock->ops = ops; sock_init_data(sock, sk); /* FIXME: sk->sk_destruct */ sk->sk_family = PF_IEEE802154; /* Checksums on by default */ sock_set_flag(sk, SOCK_ZAPPED); if (sk->sk_prot->hash) sk->sk_prot->hash(sk); if (sk->sk_prot->init) { rc = sk->sk_prot->init(sk); if (rc) sk_common_release(sk); } out: return rc; } static const struct net_proto_family ieee802154_family_ops = { .family = PF_IEEE802154, .create = ieee802154_create, .owner = THIS_MODULE, }; static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { if (!netif_running(dev)) goto drop; pr_debug("got frame, type %d, dev %p\n", dev->type, dev); #ifdef DEBUG print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len); #endif if (!net_eq(dev_net(dev), &init_net)) goto drop; ieee802154_raw_deliver(dev, skb); if (dev->type != ARPHRD_IEEE802154) goto drop; if (skb->pkt_type != PACKET_OTHERHOST) return ieee802154_dgram_deliver(dev, skb); drop: kfree_skb(skb); return NET_RX_DROP; } static struct packet_type ieee802154_packet_type = { .type = __constant_htons(ETH_P_IEEE802154), .func = ieee802154_rcv, }; static int __init af_ieee802154_init(void) { int rc = -EINVAL; rc = proto_register(&ieee802154_raw_prot, 1); if (rc) goto out; rc = proto_register(&ieee802154_dgram_prot, 1); if (rc) goto err_dgram; /* Tell SOCKET that we are alive */ rc = sock_register(&ieee802154_family_ops); if (rc) goto err_sock; dev_add_pack(&ieee802154_packet_type); rc = 0; goto out; err_sock: proto_unregister(&ieee802154_dgram_prot); err_dgram: proto_unregister(&ieee802154_raw_prot); out: return rc; } static void __exit af_ieee802154_remove(void) { dev_remove_pack(&ieee802154_packet_type); sock_unregister(PF_IEEE802154); proto_unregister(&ieee802154_dgram_prot); proto_unregister(&ieee802154_raw_prot); } module_init(af_ieee802154_init); module_exit(af_ieee802154_remove); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IEEE802154);
gpl-2.0
xsynergy510x/GPE_Kernel
drivers/scsi/bfa/bfa_hw_cb.c
9897
4748
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include "bfad_drv.h" #include "bfa_modules.h" #include "bfi_reg.h" void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN0_INT_MSK); } else { bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS); bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK); } } static void bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) { writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq), bfa->iocfc.bfa_regs.intr_status); } /* * Actions to respond RME Interrupt for Crossbow ASIC: * - Write 1 to Interrupt Status register * INTX - done in bfa_intx() * MSIX - done in bfa_hwcb_rspq_ack_msix() * - Update CI (only if new CI) */ static void bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) { writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), bfa->iocfc.bfa_regs.intr_status); if (bfa_rspq_ci(bfa, rspq) == ci) return; bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); mmiowb(); } void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) { if (bfa_rspq_ci(bfa, rspq) == ci) return; bfa_rspq_ci(bfa, rspq) = ci; writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); mmiowb(); } void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, u32 *num_vecs, u32 *max_vec_bit) { #define __HFN_NUMINTS 13 if (bfa_ioc_pcifn(&bfa->ioc) == 0) { *msix_vecs_bmap = (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0); *max_vec_bit = __HFN_INT_MBOX_LPU0; } else { *msix_vecs_bmap = (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1); *max_vec_bit = __HFN_INT_MBOX_LPU1; } *msix_vecs_bmap |= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); *num_vecs = __HFN_NUMINTS; } /* * Dummy interrupt handler for handling spurious interrupts. */ static void bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec) { } /* * No special setup required for crossbow -- vector assignments are implicit. */ void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) { WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); bfa->msix.nvecs = nvecs; bfa_hwcb_msix_uninstall(bfa); } void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_lpu_err; } void bfa_hwcb_msix_queue_install(struct bfa_s *bfa) { int i; if (bfa->msix.nvecs == 0) return; if (bfa->msix.nvecs == 1) { for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_all; return; } for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_reqq; for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { int i; for (i = 0; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_hwcb_msix_dummy; } /* * No special enable/disable -- vector assignments are implicit. */ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { if (msix) { bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; } else { bfa->iocfc.hwif.hw_reqq_ack = NULL; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; } } void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end) { *start = BFI_MSIX_RME_QMIN_CB; *end = BFI_MSIX_RME_QMAX_CB; }
gpl-2.0
wrongway801/N900T_Kernel
net/netfilter/nf_tproxy_core.c
10409
1415
/* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2007 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/net.h> #include <linux/if.h> #include <linux/netdevice.h> #include <net/udp.h> #include <net/netfilter/nf_tproxy_core.h> static void nf_tproxy_destructor(struct sk_buff *skb) { struct sock *sk = skb->sk; skb->sk = NULL; skb->destructor = NULL; if (sk) sock_put(sk); } /* consumes sk */ void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { /* assigning tw sockets complicates things; most * skb->sk->X checks would have to test sk->sk_state first */ if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } skb_orphan(skb); skb->sk = sk; skb->destructor = nf_tproxy_destructor; } EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); static int __init nf_tproxy_init(void) { pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n"); pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n"); return 0; } module_init(nf_tproxy_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Krisztian Kovacs"); MODULE_DESCRIPTION("Transparent proxy support core routines");
gpl-2.0
jsnmarek/kernel_samsung_jf
arch/frv/kernel/sysctl.c
10665
4461
/* sysctl.c: implementation of /proc/sys files relating to FRV specifically * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <asm/uaccess.h> static const char frv_cache_wback[] = "wback"; static const char frv_cache_wthru[] = "wthru"; static void frv_change_dcache_mode(unsigned long newmode) { unsigned long flags, hsr0; local_irq_save(flags); hsr0 = __get_HSR(0); hsr0 &= ~HSR0_DCE; __set_HSR(0, hsr0); asm volatile(" dcef @(gr0,gr0),#1 \n" " membar \n" : : : "memory" ); hsr0 = (hsr0 & ~HSR0_CBM) | newmode; __set_HSR(0, hsr0); hsr0 |= HSR0_DCE; __set_HSR(0, hsr0); local_irq_restore(flags); //printk("HSR0 now %08lx\n", hsr0); } /*****************************************************************************/ /* * handle requests to dynamically switch the write caching mode delivered by /proc */ static int procctl_frv_cachemode(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned long hsr0; char buff[8]; int len; len = *lenp; if (write) { /* potential state change */ if (len <= 1 || len > sizeof(buff) - 1) return -EINVAL; if (copy_from_user(buff, buffer, len) != 0) return -EFAULT; if (buff[len - 1] == '\n') buff[len - 1] = '\0'; else buff[len] = '\0'; if (strcmp(buff, frv_cache_wback) == 0) { /* switch dcache into write-back mode */ frv_change_dcache_mode(HSR0_CBM_COPY_BACK); return 0; } if (strcmp(buff, frv_cache_wthru) == 0) { /* switch dcache into write-through mode */ frv_change_dcache_mode(HSR0_CBM_WRITE_THRU); return 0; } return -EINVAL; } /* read the state */ if (*ppos > 0) { *lenp = 0; return 0; } hsr0 = __get_HSR(0); switch (hsr0 & HSR0_CBM) { case HSR0_CBM_WRITE_THRU: memcpy(buff, frv_cache_wthru, sizeof(frv_cache_wthru) - 1); buff[sizeof(frv_cache_wthru) - 1] = '\n'; len = sizeof(frv_cache_wthru); break; default: memcpy(buff, frv_cache_wback, sizeof(frv_cache_wback) - 1); buff[sizeof(frv_cache_wback) - 1] = '\n'; len = sizeof(frv_cache_wback); break; } if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buff, len) != 0) return -EFAULT; *lenp = len; *ppos = len; return 0; } /* end procctl_frv_cachemode() */ /*****************************************************************************/ /* * permit the mm_struct the nominated process is using have its MMU context ID pinned */ #ifdef CONFIG_MMU static int procctl_frv_pin_cxnr(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { pid_t pid; char buff[16], *p; int len; len = *lenp; if (write) { /* potential state change */ if (len <= 1 || len > sizeof(buff) - 1) return -EINVAL; if (copy_from_user(buff, buffer, len) != 0) return -EFAULT; if (buff[len - 1] == '\n') buff[len - 1] = '\0'; else buff[len] = '\0'; pid = simple_strtoul(buff, &p, 10); if (*p) return -EINVAL; return cxn_pin_by_pid(pid); } /* read the currently pinned CXN */ if (*ppos > 0) { *lenp = 0; return 0; } len = snprintf(buff, sizeof(buff), "%d\n", cxn_pinned); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buff, len) != 0) return -EFAULT; *lenp = len; *ppos = len; return 0; } /* end procctl_frv_pin_cxnr() */ #endif /* * FR-V specific sysctls */ static struct ctl_table frv_table[] = { { .procname = "cache-mode", .data = NULL, .maxlen = 0, .mode = 0644, .proc_handler = procctl_frv_cachemode, }, #ifdef CONFIG_MMU { .procname = "pin-cxnr", .data = NULL, .maxlen = 0, .mode = 0644, .proc_handler = procctl_frv_pin_cxnr }, #endif {} }; /* * Use a temporary sysctl number. Horrid, but will be cleaned up in 2.6 * when all the PM interfaces exist nicely. */ static struct ctl_table frv_dir_table[] = { { .procname = "frv", .mode = 0555, .child = frv_table }, {} }; /* * Initialize power interface */ static int __init frv_sysctl_init(void) { register_sysctl_table(frv_dir_table); return 0; } __initcall(frv_sysctl_init);
gpl-2.0
ajopanoor/mic_host_os
net/ax25/af_ax25.c
426
44908
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk) * Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) * Copyright (C) Hans Alblas PE1AYX (hans@esrac.ele.tue.nl) * Copyright (C) Frederic Rible F1OAT (frible@teaser.fr) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> HLIST_HEAD(ax25_list); DEFINE_SPINLOCK(ax25_list_lock); static const struct proto_ops ax25_proto_ops; static void ax25_free_sock(struct sock *sk) { ax25_cb_put(sk_to_ax25(sk)); } /* * Socket removal during an interrupt is now safe. */ static void ax25_cb_del(ax25_cb *ax25) { if (!hlist_unhashed(&ax25->ax25_node)) { spin_lock_bh(&ax25_list_lock); hlist_del_init(&ax25->ax25_node); spin_unlock_bh(&ax25_list_lock); ax25_cb_put(ax25); } } /* * Kill all bound sockets on a dropped device. */ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; spin_lock_bh(&ax25_list_lock); again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { s->ax25_dev = NULL; spin_unlock_bh(&ax25_list_lock); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart * the scan. Forward progress is ensured * because we set s->ax25_dev to NULL and we * are never passed a NULL 'dev' argument. */ goto again; } } spin_unlock_bh(&ax25_list_lock); } /* * Handle device status changes. */ static int ax25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; /* Reject non AX.25 devices */ if (dev->type != ARPHRD_AX25) return NOTIFY_DONE; switch (event) { case NETDEV_UP: ax25_dev_device_up(dev); break; case NETDEV_DOWN: ax25_kill_by_device(dev); ax25_rt_device_down(dev); ax25_dev_device_down(dev); break; default: break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ void ax25_cb_add(ax25_cb *ax25) { spin_lock_bh(&ax25_list_lock); ax25_cb_hold(ax25); hlist_add_head(&ax25->ax25_node, &ax25_list); spin_unlock_bh(&ax25_list_lock); } /* * Find a socket that wants to accept the SABM we have just * received. */ struct sock *ax25_find_listener(ax25_address *addr, int digi, struct net_device *dev, int type) { ax25_cb *s; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) continue; if (s->sk && !ax25cmp(&s->source_addr, addr) && s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) { /* If device is null we match any device */ if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { sock_hold(s->sk); spin_unlock(&ax25_list_lock); return s->sk; } } } spin_unlock(&ax25_list_lock); return NULL; } /* * Find an AX.25 socket given both ends. */ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, int type) { struct sock *sk = NULL; ax25_cb *s; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk && !ax25cmp(&s->source_addr, my_addr) && !ax25cmp(&s->dest_addr, dest_addr) && s->sk->sk_type == type) { sk = s->sk; sock_hold(sk); break; } } spin_unlock(&ax25_list_lock); return sk; } /* * Find an AX.25 control block given both ends. It will only pick up * floating AX.25 control blocks or non Raw socket bound control blocks. */ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, ax25_digi *digi, struct net_device *dev) { ax25_cb *s; spin_lock_bh(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) continue; if (s->ax25_dev == NULL) continue; if (ax25cmp(&s->source_addr, src_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->ax25_dev->dev == dev) { if (digi != NULL && digi->ndigi != 0) { if (s->digipeat == NULL) continue; if (ax25digicmp(s->digipeat, digi) != 0) continue; } else { if (s->digipeat != NULL && s->digipeat->ndigi != 0) continue; } ax25_cb_hold(s); spin_unlock_bh(&ax25_list_lock); return s; } } spin_unlock_bh(&ax25_list_lock); return NULL; } EXPORT_SYMBOL(ax25_find_cb); void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) { ax25_cb *s; struct sk_buff *copy; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->sk_type == SOCK_RAW && s->sk->sk_protocol == proto && s->ax25_dev->dev == skb->dev && atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) { if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) continue; if (sock_queue_rcv_skb(s->sk, copy) != 0) kfree_skb(copy); } } spin_unlock(&ax25_list_lock); } /* * Deferred destroy. */ void ax25_destroy_socket(ax25_cb *); /* * Handler for deferred kills. */ static void ax25_destroy_timer(unsigned long data) { ax25_cb *ax25=(ax25_cb *)data; struct sock *sk; sk=ax25->sk; bh_lock_sock(sk); sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); sock_put(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void ax25_destroy_socket(ax25_cb *ax25) { struct sk_buff *skb; ax25_cb_del(ax25); ax25_stop_heartbeat(ax25); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); ax25_clear_queues(ax25); /* Flush the queues */ if (ax25->sk != NULL) { while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) { if (skb->sk != ax25->sk) { /* A pending connection */ ax25_cb *sax25 = sk_to_ax25(skb->sk); /* Queue the unaccepted socket for death */ sock_orphan(skb->sk); /* 9A4GL: hack to release unaccepted sockets */ skb->sk->sk_state = TCP_LISTEN; ax25_start_heartbeat(sax25); sax25->state = AX25_STATE_0; } kfree_skb(skb); } skb_queue_purge(&ax25->sk->sk_write_queue); } if (ax25->sk != NULL) { if (sk_has_allocations(ax25->sk)) { /* Defer: outstanding buffers */ setup_timer(&ax25->dtimer, ax25_destroy_timer, (unsigned long)ax25); ax25->dtimer.expires = jiffies + 2 * HZ; add_timer(&ax25->dtimer); } else { struct sock *sk=ax25->sk; ax25->sk=NULL; sock_put(sk); } } else { ax25_cb_put(ax25); } } /* * dl1bke 960311: set parameters for existing AX.25 connections, * includes a KILL command to abort any connection. * VERY useful for debugging ;-) */ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) { struct ax25_ctl_struct ax25_ctl; ax25_digi digi; ax25_dev *ax25_dev; ax25_cb *ax25; unsigned int k; int ret = 0; if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) return -EFAULT; if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) return -ENODEV; if (ax25_ctl.digi_count > AX25_MAX_DIGIS) return -EINVAL; if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) return -EINVAL; digi.ndigi = ax25_ctl.digi_count; for (k = 0; k < digi.ndigi; k++) digi.calls[k] = ax25_ctl.digi_addr[k]; if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) return -ENOTCONN; switch (ax25_ctl.cmd) { case AX25_KILL: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); #ifdef CONFIG_AX25_DAMA_SLAVE if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE) ax25_dama_off(ax25); #endif ax25_disconnect(ax25, ENETRESET); break; case AX25_WINDOW: if (ax25->modulus == AX25_MODULUS) { if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7) goto einval_put; } else { if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63) goto einval_put; } ax25->window = ax25_ctl.arg; break; case AX25_T1: if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->rtt = (ax25_ctl.arg * HZ) / 2; ax25->t1 = ax25_ctl.arg * HZ; break; case AX25_T2: if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->t2 = ax25_ctl.arg * HZ; break; case AX25_N2: if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31) goto einval_put; ax25->n2count = 0; ax25->n2 = ax25_ctl.arg; break; case AX25_T3: if (ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->t3 = ax25_ctl.arg * HZ; break; case AX25_IDLE: if (ax25_ctl.arg > ULONG_MAX / (60 * HZ)) goto einval_put; ax25->idle = ax25_ctl.arg * 60 * HZ; break; case AX25_PACLEN: if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535) goto einval_put; ax25->paclen = ax25_ctl.arg; break; default: goto einval_put; } out_put: ax25_cb_put(ax25); return ret; einval_put: ret = -EINVAL; goto out_put; } static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev) { ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2; ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]); ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]); ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]); ax25->n2 = ax25_dev->values[AX25_VALUES_N2]; ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN]; ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]); ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF]; if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) { ax25->modulus = AX25_EMODULUS; ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; } } /* * Fill in a created AX.25 created control block with the default * values for a particular device. */ void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev) { ax25->ax25_dev = ax25_dev; if (ax25->ax25_dev != NULL) { ax25_fillin_cb_from_dev(ax25, ax25_dev); return; } /* * No device, use kernel / AX.25 spec default values */ ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2; ax25->t1 = msecs_to_jiffies(AX25_DEF_T1); ax25->t2 = msecs_to_jiffies(AX25_DEF_T2); ax25->t3 = msecs_to_jiffies(AX25_DEF_T3); ax25->n2 = AX25_DEF_N2; ax25->paclen = AX25_DEF_PACLEN; ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE); ax25->backoff = AX25_DEF_BACKOFF; if (AX25_DEF_AXDEFMODE) { ax25->modulus = AX25_EMODULUS; ax25->window = AX25_DEF_EWINDOW; } else { ax25->modulus = AX25_MODULUS; ax25->window = AX25_DEF_WINDOW; } } /* * Create an empty AX.25 control block. */ ax25_cb *ax25_create_cb(void) { ax25_cb *ax25; if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) return NULL; atomic_set(&ax25->refcount, 1); skb_queue_head_init(&ax25->write_queue); skb_queue_head_init(&ax25->frag_queue); skb_queue_head_init(&ax25->ack_queue); skb_queue_head_init(&ax25->reseq_queue); ax25_setup_timers(ax25); ax25_fillin_cb(ax25, NULL); ax25->state = AX25_STATE_0; return ax25; } /* * Handling for system calls applied via the various interfaces to an * AX25 socket object */ static int ax25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; struct net_device *dev; char devname[IFNAMSIZ]; unsigned long opt; int res = 0; if (level != SOL_AX25) return -ENOPROTOOPT; if (optlen < sizeof(unsigned int)) return -EINVAL; if (get_user(opt, (unsigned int __user *)optval)) return -EFAULT; lock_sock(sk); ax25 = sk_to_ax25(sk); switch (optname) { case AX25_WINDOW: if (ax25->modulus == AX25_MODULUS) { if (opt < 1 || opt > 7) { res = -EINVAL; break; } } else { if (opt < 1 || opt > 63) { res = -EINVAL; break; } } ax25->window = opt; break; case AX25_T1: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->rtt = (opt * HZ) >> 1; ax25->t1 = opt * HZ; break; case AX25_T2: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->t2 = opt * HZ; break; case AX25_N2: if (opt < 1 || opt > 31) { res = -EINVAL; break; } ax25->n2 = opt; break; case AX25_T3: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->t3 = opt * HZ; break; case AX25_IDLE: if (opt > ULONG_MAX / (60 * HZ)) { res = -EINVAL; break; } ax25->idle = opt * 60 * HZ; break; case AX25_BACKOFF: if (opt > 2) { res = -EINVAL; break; } ax25->backoff = opt; break; case AX25_EXTSEQ: ax25->modulus = opt ? AX25_EMODULUS : AX25_MODULUS; break; case AX25_PIDINCL: ax25->pidincl = opt ? 1 : 0; break; case AX25_IAMDIGI: ax25->iamdigi = opt ? 1 : 0; break; case AX25_PACLEN: if (opt < 16 || opt > 65535) { res = -EINVAL; break; } ax25->paclen = opt; break; case SO_BINDTODEVICE: if (optlen > IFNAMSIZ) optlen = IFNAMSIZ; if (copy_from_user(devname, optval, optlen)) { res = -EFAULT; break; } if (sk->sk_type == SOCK_SEQPACKET && (sock->state != SS_UNCONNECTED || sk->sk_state == TCP_LISTEN)) { res = -EADDRNOTAVAIL; break; } dev = dev_get_by_name(&init_net, devname); if (!dev) { res = -ENODEV; break; } ax25->ax25_dev = ax25_dev_ax25dev(dev); ax25_fillin_cb(ax25, ax25->ax25_dev); dev_put(dev); break; default: res = -ENOPROTOOPT; } release_sock(sk); return res; } static int ax25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; struct ax25_dev *ax25_dev; char devname[IFNAMSIZ]; void *valptr; int val = 0; int maxlen, length; if (level != SOL_AX25) return -ENOPROTOOPT; if (get_user(maxlen, optlen)) return -EFAULT; if (maxlen < 1) return -EFAULT; valptr = (void *) &val; length = min_t(unsigned int, maxlen, sizeof(int)); lock_sock(sk); ax25 = sk_to_ax25(sk); switch (optname) { case AX25_WINDOW: val = ax25->window; break; case AX25_T1: val = ax25->t1 / HZ; break; case AX25_T2: val = ax25->t2 / HZ; break; case AX25_N2: val = ax25->n2; break; case AX25_T3: val = ax25->t3 / HZ; break; case AX25_IDLE: val = ax25->idle / (60 * HZ); break; case AX25_BACKOFF: val = ax25->backoff; break; case AX25_EXTSEQ: val = (ax25->modulus == AX25_EMODULUS); break; case AX25_PIDINCL: val = ax25->pidincl; break; case AX25_IAMDIGI: val = ax25->iamdigi; break; case AX25_PACLEN: val = ax25->paclen; break; case SO_BINDTODEVICE: ax25_dev = ax25->ax25_dev; if (ax25_dev != NULL && ax25_dev->dev != NULL) { strlcpy(devname, ax25_dev->dev->name, sizeof(devname)); length = strlen(devname) + 1; } else { *devname = '\0'; length = 1; } valptr = (void *) devname; break; default: release_sock(sk); return -ENOPROTOOPT; } release_sock(sk); if (put_user(length, optlen)) return -EFAULT; return copy_to_user(optval, valptr, length) ? -EFAULT : 0; } static int ax25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int res = 0; lock_sock(sk); if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) { sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; goto out; } res = -EOPNOTSUPP; out: release_sock(sk); return res; } /* * XXX: when creating ax25_sock we should update the .obj_size setting * below. */ static struct proto ax25_proto = { .name = "AX25", .owner = THIS_MODULE, .obj_size = sizeof(struct ax25_sock), }; static int ax25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; ax25_cb *ax25; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; switch (sock->type) { case SOCK_DGRAM: if (protocol == 0 || protocol == PF_AX25) protocol = AX25_P_TEXT; break; case SOCK_SEQPACKET: switch (protocol) { case 0: case PF_AX25: /* For CLX */ protocol = AX25_P_TEXT; break; case AX25_P_SEGMENT: #ifdef CONFIG_INET case AX25_P_ARP: case AX25_P_IP: #endif #ifdef CONFIG_NETROM case AX25_P_NETROM: #endif #ifdef CONFIG_ROSE case AX25_P_ROSE: #endif return -ESOCKTNOSUPPORT; #ifdef CONFIG_NETROM_MODULE case AX25_P_NETROM: if (ax25_protocol_is_registered(AX25_P_NETROM)) return -ESOCKTNOSUPPORT; break; #endif #ifdef CONFIG_ROSE_MODULE case AX25_P_ROSE: if (ax25_protocol_is_registered(AX25_P_ROSE)) return -ESOCKTNOSUPPORT; #endif default: break; } break; case SOCK_RAW: break; default: return -ESOCKTNOSUPPORT; } sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern); if (sk == NULL) return -ENOMEM; ax25 = ax25_sk(sk)->cb = ax25_create_cb(); if (!ax25) { sk_free(sk); return -ENOMEM; } sock_init_data(sock, sk); sk->sk_destruct = ax25_free_sock; sock->ops = &ax25_proto_ops; sk->sk_protocol = protocol; ax25->sk = sk; return 0; } struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) { struct sock *sk; ax25_cb *ax25, *oax25; sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0); if (sk == NULL) return NULL; if ((ax25 = ax25_create_cb()) == NULL) { sk_free(sk); return NULL; } switch (osk->sk_type) { case SOCK_DGRAM: break; case SOCK_SEQPACKET: break; default: sk_free(sk); ax25_cb_put(ax25); return NULL; } sock_init_data(NULL, sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); oax25 = sk_to_ax25(osk); ax25->modulus = oax25->modulus; ax25->backoff = oax25->backoff; ax25->pidincl = oax25->pidincl; ax25->iamdigi = oax25->iamdigi; ax25->rtt = oax25->rtt; ax25->t1 = oax25->t1; ax25->t2 = oax25->t2; ax25->t3 = oax25->t3; ax25->n2 = oax25->n2; ax25->idle = oax25->idle; ax25->paclen = oax25->paclen; ax25->window = oax25->window; ax25->ax25_dev = ax25_dev; ax25->source_addr = oax25->source_addr; if (oax25->digipeat != NULL) { ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { sk_free(sk); ax25_cb_put(ax25); return NULL; } } ax25_sk(sk)->cb = ax25; sk->sk_destruct = ax25_free_sock; ax25->sk = sk; return sk; } static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); ax25 = sk_to_ax25(sk); if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { case AX25_STATE_0: release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); ax25_destroy_socket(ax25); break; case AX25_STATE_1: case AX25_STATE_2: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); ax25_destroy_socket(ax25); break; case AX25_STATE_3: case AX25_STATE_4: ax25_clear_queues(ax25); ax25->n2count = 0; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #endif } ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25->state = AX25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } } else { sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); ax25_destroy_socket(ax25); } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } /* * We support a funny extension here so you can (as root) give any callsign * digipeated via a local address as source. This hack is obsolete now * that we've implemented support for SO_BINDTODEVICE. It is however small * and trivially backward compatible. */ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; ax25_dev *ax25_dev = NULL; ax25_uid_assoc *user; ax25_address call; ax25_cb *ax25; int err = 0; if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_bind(): uses old (6 digipeater) socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) return -EINVAL; if (addr->fsa_ax25.sax25_family != AF_AX25) return -EINVAL; user = ax25_findbyuid(current_euid()); if (user) { call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) return -EACCES; call = addr->fsa_ax25.sax25_call; } lock_sock(sk); ax25 = sk_to_ax25(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { err = -EINVAL; goto out; } ax25->source_addr = call; /* * User already set interface with SO_BINDTODEVICE */ if (ax25->ax25_dev != NULL) goto done; if (addr_len > sizeof(struct sockaddr_ax25) && addr->fsa_ax25.sax25_ndigis == 1) { if (ax25cmp(&addr->fsa_digipeater[0], &null_ax25_address) != 0 && (ax25_dev = ax25_addr_ax25dev(&addr->fsa_digipeater[0])) == NULL) { err = -EADDRNOTAVAIL; goto out; } } else { if ((ax25_dev = ax25_addr_ax25dev(&addr->fsa_ax25.sax25_call)) == NULL) { err = -EADDRNOTAVAIL; goto out; } } if (ax25_dev != NULL) ax25_fillin_cb(ax25, ax25_dev); done: ax25_cb_add(ax25); sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return err; } /* * FIXME: nonblock behaviour looks like it may have a bug. */ static int __must_check ax25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; ax25_cb *ax25 = sk_to_ax25(sk), *ax25t; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; ax25_digi *digi = NULL; int ct = 0, err = 0; /* * some sanity checks. code further down depends on this */ if (addr_len == sizeof(struct sockaddr_ax25)) /* support for this will go away in early 2.5.x * ax25_connect(): uses obsolete socket structure */ ; else if (addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_connect(): uses old (6 digipeater) socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) return -EINVAL; if (fsa->fsa_ax25.sax25_family != AF_AX25) return -EINVAL; lock_sock(sk); /* deal with restarts */ if (sock->state == SS_CONNECTING) { switch (sk->sk_state) { case TCP_SYN_SENT: /* still trying */ err = -EINPROGRESS; goto out_release; case TCP_ESTABLISHED: /* connection established */ sock->state = SS_CONNECTED; goto out_release; case TCP_CLOSE: /* connection refused */ sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } } if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) { err = -EISCONN; /* No reconnect on a seqpacket socket */ goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; kfree(ax25->digipeat); ax25->digipeat = NULL; /* * Handle digi-peaters to be used. */ if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { err = -EINVAL; goto out_release; } if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) { err = -ENOBUFS; goto out_release; } digi->ndigi = fsa->fsa_ax25.sax25_ndigis; digi->lastrepeat = -1; while (ct < fsa->fsa_ax25.sax25_ndigis) { if ((fsa->fsa_digipeater[ct].ax25_call[6] & AX25_HBIT) && ax25->iamdigi) { digi->repeated[ct] = 1; digi->lastrepeat = ct; } else { digi->repeated[ct] = 0; } digi->calls[ct] = fsa->fsa_digipeater[ct]; ct++; } } /* * Must bind first - autobinding in this may or may not work. If * the socket is already bound, check to see if the device has * been filled in, error if it hasn't. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* check if we can remove this feature. It is broken. */ printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n", current->comm); if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) { kfree(digi); goto out_release; } ax25_fillin_cb(ax25, ax25->ax25_dev); ax25_cb_add(ax25); } else { if (ax25->ax25_dev == NULL) { kfree(digi); err = -EHOSTUNREACH; goto out_release; } } if (sk->sk_type == SOCK_SEQPACKET && (ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi, ax25->ax25_dev->dev))) { kfree(digi); err = -EADDRINUSE; /* Already such a connection */ ax25_cb_put(ax25t); goto out_release; } ax25->dest_addr = fsa->fsa_ax25.sax25_call; ax25->digipeat = digi; /* First the easy one */ if (sk->sk_type != SOCK_SEQPACKET) { sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; goto out_release; } /* Move to connecting socket, ax.25 lapb WAIT_UA.. */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_establish_data_link(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; if (ax25->ax25_dev->dama.slave) ax25_ds_establish_data_link(ax25); else ax25_std_establish_data_link(ax25); break; #endif } ax25->state = AX25_STATE_1; ax25_start_heartbeat(ax25); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { /* Not in ABM, not in WAIT_UA -> failed */ sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; err = 0; out_release: release_sock(sk); return err; } static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if (sock->state != SS_UNCONNECTED) return -EINVAL; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out; } /* * The read queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; out: release_sock(sk); return err; } static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; struct sock *sk = sock->sk; unsigned char ndigi, i; ax25_cb *ax25; int err = 0; memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); ax25 = sk_to_ax25(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->dest_addr; if (ax25->digipeat != NULL) { ndigi = ax25->digipeat->ndigi; fsa->fsa_ax25.sax25_ndigis = ndigi; for (i = 0; i < ndigi; i++) fsa->fsa_digipeater[i] = ax25->digipeat->calls[i]; } } else { fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->source_addr; fsa->fsa_ax25.sax25_ndigis = 1; if (ax25->ax25_dev != NULL) { memcpy(&fsa->fsa_digipeater[0], ax25->ax25_dev->dev->dev_addr, AX25_ADDR_LEN); } else { fsa->fsa_digipeater[0] = null_ax25_address; } } *uaddr_len = sizeof (struct full_sockaddr_ax25); out: release_sock(sk); return err; } static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name); struct sock *sk = sock->sk; struct sockaddr_ax25 sax; struct sk_buff *skb; ax25_digi dtmp, *dp; ax25_cb *ax25; size_t size; int lv, err, addr_len = msg->msg_namelen; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); ax25 = sk_to_ax25(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (ax25->ax25_dev == NULL) { err = -ENETUNREACH; goto out; } if (len > ax25->ax25_dev->dev->mtu) { err = -EMSGSIZE; goto out; } if (usax != NULL) { if (usax->sax25_family != AF_AX25) { err = -EINVAL; goto out; } if (addr_len == sizeof(struct sockaddr_ax25)) /* ax25_sendmsg(): uses obsolete socket structure */ ; else if (addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_sendmsg(): uses old (6 digipeater) * socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) { err = -EINVAL; goto out; } if (addr_len > sizeof(struct sockaddr_ax25) && usax->sax25_ndigis != 0) { int ct = 0; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { err = -EINVAL; goto out; } dtmp.ndigi = usax->sax25_ndigis; while (ct < usax->sax25_ndigis) { dtmp.repeated[ct] = 0; dtmp.calls[ct] = fsa->fsa_digipeater[ct]; ct++; } dtmp.lastrepeat = 0; } sax = *usax; if (sk->sk_type == SOCK_SEQPACKET && ax25cmp(&ax25->dest_addr, &sax.sax25_call)) { err = -EISCONN; goto out; } if (usax->sax25_ndigis == 0) dp = NULL; else dp = &dtmp; } else { /* * FIXME: 1003.1g - if the socket is like this because * it has become closed (not started closed) and is VC * we ought to SIGPIPE, EPIPE */ if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_AX25; sax.sax25_call = ax25->dest_addr; dp = ax25->digipeat; } /* Build a packet */ /* Assume the worst case */ size = len + ax25->ax25_dev->dev->hard_header_len; skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out; skb_reserve(skb, size - len); /* User data follows immediately after the AX.25 data */ if (memcpy_from_msg(skb_put(skb, len), msg, len)) { err = -EFAULT; kfree_skb(skb); goto out; } skb_reset_network_header(skb); /* Add the PID if one is not supplied by the user in the skb */ if (!ax25->pidincl) *skb_push(skb, 1) = sk->sk_protocol; if (sk->sk_type == SOCK_SEQPACKET) { /* Connected mode sockets go via the LAPB machine */ if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } /* Shove it onto the queue and kick */ ax25_output(ax25, ax25->paclen, skb); err = len; goto out; } skb_push(skb, 1 + ax25_addr_size(dp)); /* Building AX.25 Header */ /* Build an AX.25 header */ lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, dp, AX25_COMMAND, AX25_MODULUS); skb_set_transport_header(skb, lv); *skb_transport_header(skb) = AX25_UI; /* Datagram frames go straight out of the door as UI */ ax25_queue_xmit(skb, ax25->ax25_dev->dev); err = len; out: release_sock(sk); return err; } static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int err = 0; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the * queue for us! We do one quick check first though */ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; if (!sk_to_ax25(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_msg(skb, 0, msg, copied); if (msg->msg_name) { ax25_digi digi; ax25_address src; const unsigned char *mac = skb_mac_header(skb); DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name); memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it did NOT ask to know them). This could get political... **/ sax->sax25_ndigis = digi.ndigi; sax->sax25_call = src; if (sax->sax25_ndigis != 0) { int ct; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax; for (ct = 0; ct < digi.ndigi; ct++) fsa->fsa_digipeater[ct] = digi.calls[ct]; } msg->msg_namelen = sizeof(struct full_sockaddr_ax25); } skb_free_datagram(sk, skb); err = copied; out: release_sock(sk); return err; } static int ax25_shutdown(struct socket *sk, int how) { /* FIXME - generate DM and RNR states */ return -EOPNOTSUPP; } static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; int res = 0; lock_sock(sk); switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; res = put_user(amount, (int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; res = put_user(amount, (int __user *) argp); break; } case SIOCGSTAMP: res = sock_get_timestamp(sk, argp); break; case SIOCGSTAMPNS: res = sock_get_timestampns(sk, argp); break; case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */ case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */ case SIOCAX25GETUID: { struct sockaddr_ax25 sax25; if (copy_from_user(&sax25, argp, sizeof(sax25))) { res = -EFAULT; break; } res = ax25_uid_ioctl(cmd, &sax25); break; } case SIOCAX25NOUID: { /* Set the default policy (default/bar) */ long amount; if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } if (get_user(amount, (long __user *)argp)) { res = -EFAULT; break; } if (amount < 0 || amount > AX25_NOUID_BLOCK) { res = -EINVAL; break; } ax25_uid_policy = amount; res = 0; break; } case SIOCADDRT: case SIOCDELRT: case SIOCAX25OPTRT: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } res = ax25_rt_ioctl(cmd, argp); break; case SIOCAX25CTLCON: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } res = ax25_ctl_ioctl(cmd, argp); break; case SIOCAX25GETINFO: case SIOCAX25GETINFOOLD: { ax25_cb *ax25 = sk_to_ax25(sk); struct ax25_info_struct ax25_info; ax25_info.t1 = ax25->t1 / HZ; ax25_info.t2 = ax25->t2 / HZ; ax25_info.t3 = ax25->t3 / HZ; ax25_info.idle = ax25->idle / (60 * HZ); ax25_info.n2 = ax25->n2; ax25_info.t1timer = ax25_display_timer(&ax25->t1timer) / HZ; ax25_info.t2timer = ax25_display_timer(&ax25->t2timer) / HZ; ax25_info.t3timer = ax25_display_timer(&ax25->t3timer) / HZ; ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); ax25_info.n2count = ax25->n2count; ax25_info.state = ax25->state; ax25_info.rcv_q = sk_rmem_alloc_get(sk); ax25_info.snd_q = sk_wmem_alloc_get(sk); ax25_info.vs = ax25->vs; ax25_info.vr = ax25->vr; ax25_info.va = ax25->va; ax25_info.vs_max = ax25->vs; /* reserved */ ax25_info.paclen = ax25->paclen; ax25_info.window = ax25->window; /* old structure? */ if (cmd == SIOCAX25GETINFOOLD) { static int warned = 0; if (!warned) { printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n", current->comm); warned=1; } if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct_deprecated))) { res = -EFAULT; break; } } else { if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct))) { res = -EINVAL; break; } } res = 0; break; } case SIOCAX25ADDFWD: case SIOCAX25DELFWD: { struct ax25_fwd_struct ax25_fwd; if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } if (copy_from_user(&ax25_fwd, argp, sizeof(ax25_fwd))) { res = -EFAULT; break; } res = ax25_fwd_ioctl(cmd, &ax25_fwd); break; } case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: res = -EINVAL; break; default: res = -ENOIOCTLCMD; break; } release_sock(sk); return res; } #ifdef CONFIG_PROC_FS static void *ax25_info_start(struct seq_file *seq, loff_t *pos) __acquires(ax25_list_lock) { spin_lock_bh(&ax25_list_lock); return seq_hlist_start(&ax25_list, *pos); } static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &ax25_list, pos); } static void ax25_info_stop(struct seq_file *seq, void *v) __releases(ax25_list_lock) { spin_unlock_bh(&ax25_list_lock); } static int ax25_info_show(struct seq_file *seq, void *v) { ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node); char buf[11]; int k; /* * New format: * magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode */ seq_printf(seq, "%8.8lx %s %s%s ", (long) ax25, ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name, ax2asc(buf, &ax25->source_addr), ax25->iamdigi? "*":""); seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr)); for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) { seq_printf(seq, ",%s%s", ax2asc(buf, &ax25->digipeat->calls[k]), ax25->digipeat->repeated[k]? "*":""); } seq_printf(seq, " %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %d %d", ax25->state, ax25->vs, ax25->vr, ax25->va, ax25_display_timer(&ax25->t1timer) / HZ, ax25->t1 / HZ, ax25_display_timer(&ax25->t2timer) / HZ, ax25->t2 / HZ, ax25_display_timer(&ax25->t3timer) / HZ, ax25->t3 / HZ, ax25_display_timer(&ax25->idletimer) / (60 * HZ), ax25->idle / (60 * HZ), ax25->n2count, ax25->n2, ax25->rtt / HZ, ax25->window, ax25->paclen); if (ax25->sk != NULL) { seq_printf(seq, " %d %d %lu\n", sk_wmem_alloc_get(ax25->sk), sk_rmem_alloc_get(ax25->sk), sock_i_ino(ax25->sk)); } else { seq_puts(seq, " * * *\n"); } return 0; } static const struct seq_operations ax25_info_seqops = { .start = ax25_info_start, .next = ax25_info_next, .stop = ax25_info_stop, .show = ax25_info_show, }; static int ax25_info_open(struct inode *inode, struct file *file) { return seq_open(file, &ax25_info_seqops); } static const struct file_operations ax25_info_fops = { .owner = THIS_MODULE, .open = ax25_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif static const struct net_proto_family ax25_family_ops = { .family = PF_AX25, .create = ax25_create, .owner = THIS_MODULE, }; static const struct proto_ops ax25_proto_ops = { .family = PF_AX25, .owner = THIS_MODULE, .release = ax25_release, .bind = ax25_bind, .connect = ax25_connect, .socketpair = sock_no_socketpair, .accept = ax25_accept, .getname = ax25_getname, .poll = datagram_poll, .ioctl = ax25_ioctl, .listen = ax25_listen, .shutdown = ax25_shutdown, .setsockopt = ax25_setsockopt, .getsockopt = ax25_getsockopt, .sendmsg = ax25_sendmsg, .recvmsg = ax25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; /* * Called by socket.c on kernel start up */ static struct packet_type ax25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_AX25), .func = ax25_kiss_rcv, }; static struct notifier_block ax25_dev_notifier = { .notifier_call = ax25_device_event, }; static int __init ax25_init(void) { int rc = proto_register(&ax25_proto, 0); if (rc != 0) goto out; sock_register(&ax25_family_ops); dev_add_pack(&ax25_packet_type); register_netdevice_notifier(&ax25_dev_notifier); proc_create("ax25_route", S_IRUGO, init_net.proc_net, &ax25_route_fops); proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops); proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops); out: return rc; } module_init(ax25_init); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio AX.25 link layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_AX25); static void __exit ax25_exit(void) { remove_proc_entry("ax25_route", init_net.proc_net); remove_proc_entry("ax25", init_net.proc_net); remove_proc_entry("ax25_calls", init_net.proc_net); unregister_netdevice_notifier(&ax25_dev_notifier); dev_remove_pack(&ax25_packet_type); sock_unregister(PF_AX25); proto_unregister(&ax25_proto); ax25_rt_free(); ax25_uid_free(); ax25_dev_free(); } module_exit(ax25_exit);
gpl-2.0
klaudyuxxx/2.6.35.y-P500
sound/soc/s3c24xx/s3c24xx_uda134x.c
938
9680
/* * Modifications by Christian Pellegrin <chripell@evolware.org> * * s3c24xx_uda134x.c -- S3C24XX_UDA134X ALSA SoC Audio board driver * * Copyright 2007 Dension Audio Systems Ltd. * Author: Zoltan Devai * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/gpio.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/s3c24xx_uda134x.h> #include <sound/uda134x.h> #include <plat/regs-iis.h> #include "s3c-dma.h" #include "s3c24xx-i2s.h" #include "../codecs/uda134x.h" /* #define ENFORCE_RATES 1 */ /* Unfortunately the S3C24XX in master mode has a limited capacity of generating the clock for the codec. If you define this only rates that are really available will be enforced. But be careful, most user level application just want the usual sampling frequencies (8, 11.025, 22.050, 44.1 kHz) and anyway resampling is a costly operation for embedded systems. So if you aren't very lucky or your hardware engineer wasn't very forward-looking it's better to leave this undefined. If you do so an approximate value for the requested sampling rate in the range -/+ 5% will be chosen. If this in not possible an error will be returned. */ static struct clk *xtal; static struct clk *pclk; /* this is need because we don't have a place where to keep the * pointers to the clocks in each substream. We get the clocks only * when we are actually using them so we don't block stuff like * frequency change or oscillator power-off */ static int clk_users; static DEFINE_MUTEX(clk_lock); static unsigned int rates[33 * 2]; #ifdef ENFORCE_RATES static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; #endif static struct platform_device *s3c24xx_uda134x_snd_device; static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream) { int ret = 0; #ifdef ENFORCE_RATES struct snd_pcm_runtime *runtime = substream->runtime; #endif mutex_lock(&clk_lock); pr_debug("%s %d\n", __func__, clk_users); if (clk_users == 0) { xtal = clk_get(&s3c24xx_uda134x_snd_device->dev, "xtal"); if (!xtal) { printk(KERN_ERR "%s cannot get xtal\n", __func__); ret = -EBUSY; } else { pclk = clk_get(&s3c24xx_uda134x_snd_device->dev, "pclk"); if (!pclk) { printk(KERN_ERR "%s cannot get pclk\n", __func__); clk_put(xtal); ret = -EBUSY; } } if (!ret) { int i, j; for (i = 0; i < 2; i++) { int fs = i ? 256 : 384; rates[i*33] = clk_get_rate(xtal) / fs; for (j = 1; j < 33; j++) rates[i*33 + j] = clk_get_rate(pclk) / (j * fs); } } } clk_users += 1; mutex_unlock(&clk_lock); if (!ret) { #ifdef ENFORCE_RATES ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); if (ret < 0) printk(KERN_ERR "%s cannot set constraints\n", __func__); #endif } return ret; } static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream) { mutex_lock(&clk_lock); pr_debug("%s %d\n", __func__, clk_users); clk_users -= 1; if (clk_users == 0) { clk_put(xtal); xtal = NULL; clk_put(pclk); pclk = NULL; } mutex_unlock(&clk_lock); } static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; unsigned int clk = 0; int ret = 0; int clk_source, fs_mode; unsigned long rate = params_rate(params); long err, cerr; unsigned int div; int i, bi; err = 999999; bi = 0; for (i = 0; i < 2*33; i++) { cerr = rates[i] - rate; if (cerr < 0) cerr = -cerr; if (cerr < err) { err = cerr; bi = i; } } if (bi / 33 == 1) fs_mode = S3C2410_IISMOD_256FS; else fs_mode = S3C2410_IISMOD_384FS; if (bi % 33 == 0) { clk_source = S3C24XX_CLKSRC_MPLL; div = 1; } else { clk_source = S3C24XX_CLKSRC_PCLK; div = bi % 33; } pr_debug("%s desired rate %lu, %d\n", __func__, rate, bi); clk = (fs_mode == S3C2410_IISMOD_384FS ? 384 : 256) * rate; pr_debug("%s will use: %s %s %d sysclk %d err %ld\n", __func__, fs_mode == S3C2410_IISMOD_384FS ? "384FS" : "256FS", clk_source == S3C24XX_CLKSRC_MPLL ? "MPLLin" : "PCLK", div, clk, err); if ((err * 100 / rate) > 5) { printk(KERN_ERR "S3C24XX_UDA134X: effective frequency " "too different from desired (%ld%%)\n", err * 100 / rate); return -EINVAL; } ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(cpu_dai, clk_source , clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, fs_mode); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK, S3C2410_IISMOD_32FS); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER, S3C24XX_PRESCALE(div, div)); if (ret < 0) return ret; /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk, SND_SOC_CLOCK_OUT); if (ret < 0) return ret; return 0; } static struct snd_soc_ops s3c24xx_uda134x_ops = { .startup = s3c24xx_uda134x_startup, .shutdown = s3c24xx_uda134x_shutdown, .hw_params = s3c24xx_uda134x_hw_params, }; static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { .name = "UDA134X", .stream_name = "UDA134X", .codec_dai = &uda134x_dai, .cpu_dai = &s3c24xx_i2s_dai, .ops = &s3c24xx_uda134x_ops, }; static struct snd_soc_card snd_soc_s3c24xx_uda134x = { .name = "S3C24XX_UDA134X", .platform = &s3c24xx_soc_platform, .dai_link = &s3c24xx_uda134x_dai_link, .num_links = 1, }; static struct s3c24xx_uda134x_platform_data *s3c24xx_uda134x_l3_pins; static void setdat(int v) { gpio_set_value(s3c24xx_uda134x_l3_pins->l3_data, v > 0); } static void setclk(int v) { gpio_set_value(s3c24xx_uda134x_l3_pins->l3_clk, v > 0); } static void setmode(int v) { gpio_set_value(s3c24xx_uda134x_l3_pins->l3_mode, v > 0); } static struct uda134x_platform_data s3c24xx_uda134x = { .l3 = { .setdat = setdat, .setclk = setclk, .setmode = setmode, .data_hold = 1, .data_setup = 1, .clock_high = 1, .mode_hold = 1, .mode = 1, .mode_setup = 1, }, }; static struct snd_soc_device s3c24xx_uda134x_snd_devdata = { .card = &snd_soc_s3c24xx_uda134x, .codec_dev = &soc_codec_dev_uda134x, .codec_data = &s3c24xx_uda134x, }; static int s3c24xx_uda134x_setup_pin(int pin, char *fun) { if (gpio_request(pin, "s3c24xx_uda134x") < 0) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: " "l3 %s pin already in use", fun); return -EBUSY; } gpio_direction_output(pin, 0); return 0; } static int s3c24xx_uda134x_probe(struct platform_device *pdev) { int ret; printk(KERN_INFO "S3C24XX_UDA134X SoC Audio driver\n"); s3c24xx_uda134x_l3_pins = pdev->dev.platform_data; if (s3c24xx_uda134x_l3_pins == NULL) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: " "unable to find platform data\n"); return -ENODEV; } s3c24xx_uda134x.power = s3c24xx_uda134x_l3_pins->power; s3c24xx_uda134x.model = s3c24xx_uda134x_l3_pins->model; if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_data, "data") < 0) return -EBUSY; if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_clk, "clk") < 0) { gpio_free(s3c24xx_uda134x_l3_pins->l3_data); return -EBUSY; } if (s3c24xx_uda134x_setup_pin(s3c24xx_uda134x_l3_pins->l3_mode, "mode") < 0) { gpio_free(s3c24xx_uda134x_l3_pins->l3_data); gpio_free(s3c24xx_uda134x_l3_pins->l3_clk); return -EBUSY; } s3c24xx_uda134x_snd_device = platform_device_alloc("soc-audio", -1); if (!s3c24xx_uda134x_snd_device) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: " "Unable to register\n"); return -ENOMEM; } platform_set_drvdata(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x_snd_devdata); s3c24xx_uda134x_snd_devdata.dev = &s3c24xx_uda134x_snd_device->dev; ret = platform_device_add(s3c24xx_uda134x_snd_device); if (ret) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n"); platform_device_put(s3c24xx_uda134x_snd_device); } return ret; } static int s3c24xx_uda134x_remove(struct platform_device *pdev) { platform_device_unregister(s3c24xx_uda134x_snd_device); gpio_free(s3c24xx_uda134x_l3_pins->l3_data); gpio_free(s3c24xx_uda134x_l3_pins->l3_clk); gpio_free(s3c24xx_uda134x_l3_pins->l3_mode); return 0; } static struct platform_driver s3c24xx_uda134x_driver = { .probe = s3c24xx_uda134x_probe, .remove = s3c24xx_uda134x_remove, .driver = { .name = "s3c24xx_uda134x", .owner = THIS_MODULE, }, }; static int __init s3c24xx_uda134x_init(void) { return platform_driver_register(&s3c24xx_uda134x_driver); } static void __exit s3c24xx_uda134x_exit(void) { platform_driver_unregister(&s3c24xx_uda134x_driver); } module_init(s3c24xx_uda134x_init); module_exit(s3c24xx_uda134x_exit); MODULE_AUTHOR("Zoltan Devai, Christian Pellegrin <chripell@evolware.org>"); MODULE_DESCRIPTION("S3C24XX_UDA134X ALSA SoC audio driver"); MODULE_LICENSE("GPL");
gpl-2.0
drhonk/SGH-T959V-GB
drivers/scsi/libiscsi.c
938
92953
/* * iSCSI lib functions * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2004 - 2006 Mike Christie * Copyright (C) 2004 - 2005 Dmitry Yusupov * Copyright (C) 2004 - 2005 Alex Aizman * maintained by open-iscsi@googlegroups.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/types.h> #include <linux/kfifo.h> #include <linux/delay.h> #include <linux/log2.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/iscsi_proto.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> #include <scsi/libiscsi.h> static int iscsi_dbg_lib_conn; module_param_named(debug_libiscsi_conn, iscsi_dbg_lib_conn, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_conn, "Turn on debugging for connections in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); static int iscsi_dbg_lib_session; module_param_named(debug_libiscsi_session, iscsi_dbg_lib_session, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_session, "Turn on debugging for sessions in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); static int iscsi_dbg_lib_eh; module_param_named(debug_libiscsi_eh, iscsi_dbg_lib_eh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug_libiscsi_eh, "Turn on debugging for error handling in libiscsi module. " "Set to 1 to turn on, and zero to turn off. Default is off."); #define ISCSI_DBG_CONN(_conn, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_conn) \ iscsi_conn_printk(KERN_INFO, _conn, \ "%s " dbg_fmt, \ __func__, ##arg); \ } while (0); #define ISCSI_DBG_SESSION(_session, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_session) \ iscsi_session_printk(KERN_INFO, _session, \ "%s " dbg_fmt, \ __func__, ##arg); \ } while (0); #define ISCSI_DBG_EH(_session, dbg_fmt, arg...) \ do { \ if (iscsi_dbg_lib_eh) \ iscsi_session_printk(KERN_INFO, _session, \ "%s " dbg_fmt, \ __func__, ##arg); \ } while (0); /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ #define SNA32_CHECK 2147483648UL static int iscsi_sna_lt(u32 n1, u32 n2) { return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || (n1 > n2 && (n2 - n1 < SNA32_CHECK))); } /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */ static int iscsi_sna_lte(u32 n1, u32 n2) { return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) || (n1 > n2 && (n2 - n1 < SNA32_CHECK))); } inline void iscsi_conn_queue_work(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); if (ihost->workq) queue_work(ihost->workq, &conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); static void __iscsi_update_cmdsn(struct iscsi_session *session, uint32_t exp_cmdsn, uint32_t max_cmdsn) { /* * standard specifies this check for when to update expected and * max sequence numbers */ if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1)) return; if (exp_cmdsn != session->exp_cmdsn && !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn)) session->exp_cmdsn = exp_cmdsn; if (max_cmdsn != session->max_cmdsn && !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { session->max_cmdsn = max_cmdsn; /* * if the window closed with IO queued, then kick the * xmit thread */ if (!list_empty(&session->leadconn->cmdqueue) || !list_empty(&session->leadconn->mgmtqueue)) iscsi_conn_queue_work(session->leadconn); } } void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) { __iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn), be32_to_cpu(hdr->max_cmdsn)); } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); /** * iscsi_prep_data_out_pdu - initialize Data-Out * @task: scsi command task * @r2t: R2T info * @hdr: iscsi data in pdu * * Notes: * Initialize Data-Out within this R2T sequence and finds * proper data_offset within this SCSI command. * * This function is called with connection lock taken. **/ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t, struct iscsi_data *hdr) { struct iscsi_conn *conn = task->conn; unsigned int left = r2t->data_length - r2t->sent; task->hdr_len = sizeof(struct iscsi_data); memset(hdr, 0, sizeof(struct iscsi_data)); hdr->ttt = r2t->ttt; hdr->datasn = cpu_to_be32(r2t->datasn); r2t->datasn++; hdr->opcode = ISCSI_OP_SCSI_DATA_OUT; memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); hdr->itt = task->hdr_itt; hdr->exp_statsn = r2t->exp_statsn; hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent); if (left > conn->max_xmit_dlength) { hton24(hdr->dlength, conn->max_xmit_dlength); r2t->data_count = conn->max_xmit_dlength; hdr->flags = 0; } else { hton24(hdr->dlength, left); r2t->data_count = left; hdr->flags = ISCSI_FLAG_CMD_FINAL; } conn->dataout_pdus_cnt++; } EXPORT_SYMBOL_GPL(iscsi_prep_data_out_pdu); static int iscsi_add_hdr(struct iscsi_task *task, unsigned len) { unsigned exp_len = task->hdr_len + len; if (exp_len > task->hdr_max) { WARN_ON(1); return -EINVAL; } WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */ task->hdr_len = exp_len; return 0; } /* * make an extended cdb AHS */ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) { struct scsi_cmnd *cmd = task->sc; unsigned rlen, pad_len; unsigned short ahslength; struct iscsi_ecdb_ahdr *ecdb_ahdr; int rc; ecdb_ahdr = iscsi_next_hdr(task); rlen = cmd->cmd_len - ISCSI_CDB_SIZE; BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb)); ahslength = rlen + sizeof(ecdb_ahdr->reserved); pad_len = iscsi_padding(rlen); rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) + sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len); if (rc) return rc; if (pad_len) memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len); ecdb_ahdr->ahslength = cpu_to_be16(ahslength); ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB; ecdb_ahdr->reserved = 0; memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen); ISCSI_DBG_SESSION(task->conn->session, "iscsi_prep_ecdb_ahs: varlen_cdb_len %d " "rlen %d pad_len %d ahs_length %d iscsi_headers_size " "%u\n", cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len); return 0; } static int iscsi_prep_bidi_ahs(struct iscsi_task *task) { struct scsi_cmnd *sc = task->sc; struct iscsi_rlength_ahdr *rlen_ahdr; int rc; rlen_ahdr = iscsi_next_hdr(task); rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); if (rc) return rc; rlen_ahdr->ahslength = cpu_to_be16(sizeof(rlen_ahdr->read_length) + sizeof(rlen_ahdr->reserved)); rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH; rlen_ahdr->reserved = 0; rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); ISCSI_DBG_SESSION(task->conn->session, "bidi-in rlen_ahdr->read_length(%d) " "rlen_ahdr->ahslength(%d)\n", be32_to_cpu(rlen_ahdr->read_length), be16_to_cpu(rlen_ahdr->ahslength)); return 0; } /** * iscsi_check_tmf_restrictions - check if a task is affected by TMF * @task: iscsi task * @opcode: opcode to check for * * During TMF a task has to be checked if it's affected. * All unrelated I/O can be passed through, but I/O to the * affected LUN should be restricted. * If 'fast_abort' is set we won't be sending any I/O to the * affected LUN. * Otherwise the target is waiting for all TTTs to be completed, * so we have to send all outstanding Data-Out PDUs to the target. */ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode) { struct iscsi_conn *conn = task->conn; struct iscsi_tm *tmf = &conn->tmhdr; unsigned int hdr_lun; if (conn->tmf_state == TMF_INITIAL) return 0; if ((tmf->opcode & ISCSI_OPCODE_MASK) != ISCSI_OP_SCSI_TMFUNC) return 0; switch (ISCSI_TM_FUNC_VALUE(tmf)) { case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET: /* * Allow PDUs for unrelated LUNs */ hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun); if (hdr_lun != task->sc->device->lun) return 0; /* fall through */ case ISCSI_TM_FUNC_TARGET_WARM_RESET: /* * Fail all SCSI cmd PDUs */ if (opcode != ISCSI_OP_SCSI_DATA_OUT) { iscsi_conn_printk(KERN_INFO, conn, "task [op %x/%x itt " "0x%x/0x%x] " "rejected.\n", task->hdr->opcode, opcode, task->itt, task->hdr_itt); return -EACCES; } /* * And also all data-out PDUs in response to R2T * if fast_abort is set. */ if (conn->session->fast_abort) { iscsi_conn_printk(KERN_INFO, conn, "task [op %x/%x itt " "0x%x/0x%x] fast abort.\n", task->hdr->opcode, opcode, task->itt, task->hdr_itt); return -EACCES; } break; case ISCSI_TM_FUNC_ABORT_TASK: /* * the caller has already checked if the task * they want to abort was in the pending queue so if * we are here the cmd pdu has gone out already, and * we will only hit this for data-outs */ if (opcode == ISCSI_OP_SCSI_DATA_OUT && task->hdr_itt == tmf->rtt) { ISCSI_DBG_SESSION(conn->session, "Preventing task %x/%x from sending " "data-out due to abort task in " "progress\n", task->itt, task->hdr_itt); return -EACCES; } break; } return 0; } /** * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu * @task: iscsi task * * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set * fields like dlength or final based on how much data it sends */ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; struct iscsi_cmd *hdr; unsigned hdrlength, cmd_len; itt_t itt; int rc; rc = iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_CMD); if (rc) return rc; if (conn->session->tt->alloc_pdu) { rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); if (rc) return rc; } hdr = (struct iscsi_cmd *) task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); if (session->tt->parse_pdu_itt) hdr->itt = task->hdr_itt = itt; else hdr->itt = task->hdr_itt = build_itt(task->itt, task->conn->session->age); task->hdr_len = 0; rc = iscsi_add_hdr(task, sizeof(*hdr)); if (rc) return rc; hdr->opcode = ISCSI_OP_SCSI_CMD; hdr->flags = ISCSI_ATTR_SIMPLE; int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); memcpy(task->lun, hdr->lun, sizeof(task->lun)); hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); cmd_len = sc->cmd_len; if (cmd_len < ISCSI_CDB_SIZE) memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len); else if (cmd_len > ISCSI_CDB_SIZE) { rc = iscsi_prep_ecdb_ahs(task); if (rc) return rc; cmd_len = ISCSI_CDB_SIZE; } memcpy(hdr->cdb, sc->cmnd, cmd_len); task->imm_count = 0; if (scsi_bidi_cmnd(sc)) { hdr->flags |= ISCSI_FLAG_CMD_READ; rc = iscsi_prep_bidi_ahs(task); if (rc) return rc; } if (sc->sc_data_direction == DMA_TO_DEVICE) { unsigned out_len = scsi_out(sc)->length; struct iscsi_r2t_info *r2t = &task->unsol_r2t; hdr->data_length = cpu_to_be32(out_len); hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* * Write counters: * * imm_count bytes to be sent right after * SCSI PDU Header * * unsol_count bytes(as Data-Out) to be sent * without R2T ack right after * immediate data * * r2t data_length bytes to be sent via R2T ack's * * pad_count bytes to be sent as zero-padding */ memset(r2t, 0, sizeof(*r2t)); if (session->imm_data_en) { if (out_len >= session->first_burst) task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else task->imm_count = min(out_len, conn->max_xmit_dlength); hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { r2t->data_length = min(session->first_burst, out_len) - task->imm_count; r2t->data_offset = task->imm_count; r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); r2t->exp_statsn = cpu_to_be32(conn->exp_statsn); } if (!task->unsol_r2t.data_length) /* No unsolicit Data-Out's */ hdr->flags |= ISCSI_FLAG_CMD_FINAL; } else { hdr->flags |= ISCSI_FLAG_CMD_FINAL; zero_data(hdr->dlength); hdr->data_length = cpu_to_be32(scsi_in(sc)->length); if (sc->sc_data_direction == DMA_FROM_DEVICE) hdr->flags |= ISCSI_FLAG_CMD_READ; } /* calculate size of additional header segments (AHSs) */ hdrlength = task->hdr_len - sizeof(*hdr); WARN_ON(hdrlength & (ISCSI_PAD_LEN-1)); hdrlength /= ISCSI_PAD_LEN; WARN_ON(hdrlength >= 256); hdr->hlength = hdrlength & 0xFF; hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn); if (session->tt->init_task && session->tt->init_task(task)) return -EIO; task->state = ISCSI_TASK_RUNNING; session->cmdsn++; conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ? "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); return 0; } /** * iscsi_free_task - free a task * @task: iscsi cmd task * * Must be called with session lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ static void iscsi_free_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", task->itt, task->state, task->sc); session->tt->cleanup_task(task); task->state = ISCSI_TASK_FREE; task->sc = NULL; /* * login task is preallocated so do not free */ if (conn->login_task == task) return; kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); if (sc) { task->sc = NULL; /* SCSI eh reuses commands to verify us */ sc->SCp.ptr = NULL; /* * queue command may call this to free the task, but * not have setup the sc callback */ if (sc->scsi_done) sc->scsi_done(sc); } } void __iscsi_get_task(struct iscsi_task *task) { atomic_inc(&task->refcount); } EXPORT_SYMBOL_GPL(__iscsi_get_task); static void __iscsi_put_task(struct iscsi_task *task) { if (atomic_dec_and_test(&task->refcount)) iscsi_free_task(task); } void iscsi_put_task(struct iscsi_task *task) { struct iscsi_session *session = task->conn->session; spin_lock_bh(&session->lock); __iscsi_put_task(task); spin_unlock_bh(&session->lock); } EXPORT_SYMBOL_GPL(iscsi_put_task); /** * iscsi_complete_task - finish a task * @task: iscsi cmd task * @state: state to complete task with * * Must be called with session lock. */ static void iscsi_complete_task(struct iscsi_task *task, int state) { struct iscsi_conn *conn = task->conn; ISCSI_DBG_SESSION(conn->session, "complete task itt 0x%x state %d sc %p\n", task->itt, task->state, task->sc); if (task->state == ISCSI_TASK_COMPLETED || task->state == ISCSI_TASK_ABRT_TMF || task->state == ISCSI_TASK_ABRT_SESS_RECOV) return; WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); task->state = state; if (!list_empty(&task->running)) list_del_init(&task->running); if (conn->task == task) conn->task = NULL; if (conn->ping_task == task) conn->ping_task = NULL; /* release get from queueing */ __iscsi_put_task(task); } /** * iscsi_complete_scsi_task - finish scsi task normally * @task: iscsi task for scsi cmd * @exp_cmdsn: expected cmd sn in cpu format * @max_cmdsn: max cmd sn in cpu format * * This is used when drivers do not need or cannot perform * lower level pdu processing. * * Called with session lock */ void iscsi_complete_scsi_task(struct iscsi_task *task, uint32_t exp_cmdsn, uint32_t max_cmdsn) { struct iscsi_conn *conn = task->conn; ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt); conn->last_recv = jiffies; __iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task); /* * session lock must be held and if not called for a task that is * still pending or from the xmit thread, then xmit thread must * be suspended. */ static void fail_scsi_task(struct iscsi_task *task, int err) { struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; int state; /* * if a command completes and we get a successful tmf response * we will hit this because the scsi eh abort code does not take * a ref to the task. */ sc = task->sc; if (!sc) return; if (task->state == ISCSI_TASK_PENDING) { /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; /* it was never sent so just complete like normal */ state = ISCSI_TASK_COMPLETED; } else if (err == DID_TRANSPORT_DISRUPTED) state = ISCSI_TASK_ABRT_SESS_RECOV; else state = ISCSI_TASK_ABRT_TMF; sc->result = err << 16; if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); else { scsi_out(sc)->resid = scsi_out(sc)->length; scsi_in(sc)->resid = scsi_in(sc)->length; } iscsi_complete_task(task, state); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, struct iscsi_task *task) { struct iscsi_session *session = conn->session; struct iscsi_hdr *hdr = task->hdr; struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr; uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; if (conn->session->state == ISCSI_STATE_LOGGING_OUT) return -ENOTCONN; if (opcode != ISCSI_OP_LOGIN && opcode != ISCSI_OP_TEXT) nop->exp_statsn = cpu_to_be32(conn->exp_statsn); /* * pre-format CmdSN for outgoing PDU. */ nop->cmdsn = cpu_to_be32(session->cmdsn); if (hdr->itt != RESERVED_ITT) { /* * TODO: We always use immediate for normal session pdus. * If we start to send tmfs or nops as non-immediate then * we should start checking the cmdsn numbers for mgmt tasks. * * During discovery sessions iscsid sends TEXT as non immediate, * but we always only send one PDU at a time. */ if (conn->c_stage == ISCSI_CONN_STARTED && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) { session->queued_cmdsn++; session->cmdsn++; } } if (session->tt->init_task && session->tt->init_task(task)) return -EIO; if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) session->state = ISCSI_STATE_LOGGING_OUT; task->state = ISCSI_TASK_RUNNING; ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, task->data_count); return 0; } static struct iscsi_task * __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; struct iscsi_host *ihost = shost_priv(session->host); uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK; struct iscsi_task *task; itt_t itt; if (session->state == ISCSI_STATE_TERMINATE) return NULL; if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) { /* * Login and Text are sent serially, in * request-followed-by-response sequence. * Same task can be used. Same ITT must be used. * Note that login_task is preallocated at conn_create(). */ if (conn->login_task->state != ISCSI_TASK_FREE) { iscsi_conn_printk(KERN_ERR, conn, "Login/Text in " "progress. Cannot start new task.\n"); return NULL; } task = conn->login_task; } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); if (!kfifo_out(&session->cmdpool.queue, (void*)&task, sizeof(void*))) return NULL; } /* * released in complete pdu for task we expect a response for, and * released by the lld when it has transmitted the task for * pdus we do not expect a response for. */ atomic_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; INIT_LIST_HEAD(&task->running); task->state = ISCSI_TASK_PENDING; if (data_size) { memcpy(task->data, data, data_size); task->data_count = data_size; } else task->data_count = 0; if (conn->session->tt->alloc_pdu) { if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " "pdu for mgmt task.\n"); goto free_task; } } itt = task->hdr->itt; task->hdr_len = sizeof(struct iscsi_hdr); memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); if (hdr->itt != RESERVED_ITT) { if (session->tt->parse_pdu_itt) task->hdr->itt = itt; else task->hdr->itt = build_itt(task->itt, task->conn->session->age); } if (!ihost->workq) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; if (session->tt->xmit_task(task)) goto free_task; } else { list_add_tail(&task->running, &conn->mgmtqueue); iscsi_conn_queue_work(conn); } return task; free_task: __iscsi_put_task(task); return NULL; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; int err = 0; spin_lock_bh(&session->lock); if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size)) err = -EPERM; spin_unlock_bh(&session->lock); return err; } EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); /** * iscsi_cmd_rsp - SCSI Command Response processing * @conn: iscsi connection * @hdr: iscsi header * @task: scsi command task * @data: cmd data buffer * @datalen: len of buffer * * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and * then completes the command and task. **/ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task, char *data, int datalen) { struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr); conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; sc->result = (DID_OK << 16) | rhdr->cmd_status; if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { sc->result = DID_ERROR << 16; goto out; } if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) { uint16_t senselen; if (datalen < 2) { invalid_datalen: iscsi_conn_printk(KERN_ERR, conn, "Got CHECK_CONDITION but invalid data " "buffer size of %d\n", datalen); sc->result = DID_BAD_TARGET << 16; goto out; } senselen = get_unaligned_be16(data); if (datalen < senselen) goto invalid_datalen; memcpy(sc->sense_buffer, data + 2, min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); ISCSI_DBG_SESSION(session, "copied %d bytes of sense\n", min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE)); } if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { int res_count = be32_to_cpu(rhdr->bi_residual_count); if (scsi_bidi_cmnd(sc) && res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW || res_count <= scsi_in(sc)->length)) scsi_in(sc)->resid = res_count; else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { int res_count = be32_to_cpu(rhdr->residual_count); if (res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || res_count <= scsi_bufflen(sc))) /* write side for bidi or uni-io set_resid */ scsi_set_resid(sc, res_count); else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } out: ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } /** * iscsi_data_in_rsp - SCSI Data-In Response processing * @conn: iscsi connection * @hdr: iscsi pdu * @task: scsi command task **/ static void iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, struct iscsi_task *task) { struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)hdr; struct scsi_cmnd *sc = task->sc; if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) return; iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); sc->result = (DID_OK << 16) | rhdr->cmd_status; conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | ISCSI_FLAG_DATA_OVERFLOW)) { int res_count = be32_to_cpu(rhdr->residual_count); if (res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || res_count <= scsi_in(sc)->length)) scsi_in(sc)->resid = res_count; else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } ISCSI_DBG_SESSION(conn->session, "data in with status done " "[sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) { struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr; conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; conn->tmfrsp_pdus_cnt++; if (conn->tmf_state != TMF_QUEUED) return; if (tmf->response == ISCSI_TMF_RSP_COMPLETE) conn->tmf_state = TMF_SUCCESS; else if (tmf->response == ISCSI_TMF_RSP_NO_TASK) conn->tmf_state = TMF_NOT_FOUND; else conn->tmf_state = TMF_FAILED; wake_up(&conn->ehwait); } static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr && conn->ping_task) return; memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; hdr.flags = ISCSI_FLAG_CMD_FINAL; if (rhdr) { memcpy(hdr.lun, rhdr->lun, 8); hdr.ttt = rhdr->ttt; hdr.itt = RESERVED_ITT; } else hdr.ttt = RESERVED_ITT; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); if (!task) iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); else if (!rhdr) { /* only track our nops */ conn->ping_task = task; conn->last_ping = jiffies; } } static int iscsi_nop_out_rsp(struct iscsi_task *task, struct iscsi_nopin *nop, char *data, int datalen) { struct iscsi_conn *conn = task->conn; int rc = 0; if (conn->ping_task != task) { /* * If this is not in response to one of our * nops then it must be from userspace. */ if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; } else mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { struct iscsi_reject *reject = (struct iscsi_reject *)hdr; struct iscsi_hdr rejected_pdu; int opcode, rc = 0; conn->exp_statsn = be32_to_cpu(reject->statsn) + 1; if (ntoh24(reject->dlength) > datalen || ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) { iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected " "pdu. Invalid data length (pdu dlength " "%u, datalen %d\n", ntoh24(reject->dlength), datalen); return ISCSI_ERR_PROTO; } memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK; switch (reject->reason) { case ISCSI_REASON_DATA_DIGEST_ERROR: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected " "due to DataDigest error.\n", rejected_pdu.itt, opcode); break; case ISCSI_REASON_IMM_CMD_REJECT: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Too many " "immediate commands.\n", rejected_pdu.itt, opcode); /* * We only send one TMF at a time so if the target could not * handle it, then it should get fixed (RFC mandates that * a target can handle one immediate TMF per conn). * * For nops-outs, we could have sent more than one if * the target is sending us lots of nop-ins */ if (opcode != ISCSI_OP_NOOP_OUT) return 0; if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) /* * nop-out in response to target's nop-out rejected. * Just resend. */ iscsi_send_nopout(conn, (struct iscsi_nopin*)&rejected_pdu); else { struct iscsi_task *task; /* * Our nop as ping got dropped. We know the target * and transport are ok so just clean up */ task = iscsi_itt_to_task(conn, rejected_pdu.itt); if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu reject. Could " "not lookup rejected task.\n"); rc = ISCSI_ERR_BAD_ITT; } else rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)&rejected_pdu, NULL, 0); } break; default: iscsi_conn_printk(KERN_ERR, conn, "pdu (op 0x%x itt 0x%x) rejected. Reason " "code 0x%x\n", rejected_pdu.itt, rejected_pdu.opcode, reject->reason); break; } return rc; } /** * iscsi_itt_to_task - look up task by itt * @conn: iscsi connection * @itt: itt * * This should be used for mgmt tasks like login and nops, or if * the LDD's itt space does not include the session age. * * The session lock must be held. */ struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int i; if (itt == RESERVED_ITT) return NULL; if (session->tt->parse_pdu_itt) session->tt->parse_pdu_itt(conn, itt, &i, NULL); else i = get_itt(itt); if (i >= session->cmds_max) return NULL; return session->cmds[i]; } EXPORT_SYMBOL_GPL(iscsi_itt_to_task); /** * __iscsi_complete_pdu - complete pdu * @conn: iscsi conn * @hdr: iscsi header * @data: data buffer * @datalen: len of data buffer * * Completes pdu processing by freeing any resources allocated at * queuecommand or send generic. session lock must be held and verify * itt must have been called. */ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { struct iscsi_session *session = conn->session; int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0; struct iscsi_task *task; uint32_t itt; conn->last_recv = jiffies; rc = iscsi_verify_itt(conn, hdr->itt); if (rc) return rc; if (hdr->itt != RESERVED_ITT) itt = get_itt(hdr->itt); else itt = ~0U; ISCSI_DBG_SESSION(session, "[op 0x%x cid %d itt 0x%x len %d]\n", opcode, conn->id, itt, datalen); if (itt == ~0U) { iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); switch(opcode) { case ISCSI_OP_NOOP_IN: if (datalen) { rc = ISCSI_ERR_PROTO; break; } if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) break; iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); break; case ISCSI_OP_REJECT: rc = iscsi_handle_reject(conn, hdr, data, datalen); break; case ISCSI_OP_ASYNC_EVENT: conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; break; default: rc = ISCSI_ERR_BAD_OPCODE; break; } goto out; } switch(opcode) { case ISCSI_OP_SCSI_CMD_RSP: case ISCSI_OP_SCSI_DATA_IN: task = iscsi_itt_to_ctask(conn, hdr->itt); if (!task) return ISCSI_ERR_BAD_ITT; task->last_xfer = jiffies; break; case ISCSI_OP_R2T: /* * LLD handles R2Ts if they need to. */ return 0; case ISCSI_OP_LOGOUT_RSP: case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: case ISCSI_OP_SCSI_TMFUNC_RSP: case ISCSI_OP_NOOP_IN: task = iscsi_itt_to_task(conn, hdr->itt); if (!task) return ISCSI_ERR_BAD_ITT; break; default: return ISCSI_ERR_BAD_OPCODE; } switch(opcode) { case ISCSI_OP_SCSI_CMD_RSP: iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen); break; case ISCSI_OP_SCSI_DATA_IN: iscsi_data_in_rsp(conn, hdr, task); break; case ISCSI_OP_LOGOUT_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; goto recv_pdu; case ISCSI_OP_LOGIN_RSP: case ISCSI_OP_TEXT_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); /* * login related PDU's exp_statsn is handled in * userspace */ goto recv_pdu; case ISCSI_OP_SCSI_TMFUNC_RSP: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (datalen) { rc = ISCSI_ERR_PROTO; break; } iscsi_tmf_rsp(conn, hdr); iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { rc = ISCSI_ERR_PROTO; break; } conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1; rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr, data, datalen); break; default: rc = ISCSI_ERR_BAD_OPCODE; break; } out: return rc; recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, int datalen) { int rc; spin_lock(&conn->session->lock); rc = __iscsi_complete_pdu(conn, hdr, data, datalen); spin_unlock(&conn->session->lock); return rc; } EXPORT_SYMBOL_GPL(iscsi_complete_pdu); int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int age = 0, i = 0; if (itt == RESERVED_ITT) return 0; if (session->tt->parse_pdu_itt) session->tt->parse_pdu_itt(conn, itt, &i, &age); else { i = get_itt(itt); age = ((__force u32)itt >> ISCSI_AGE_SHIFT) & ISCSI_AGE_MASK; } if (age != session->age) { iscsi_conn_printk(KERN_ERR, conn, "received itt %x expected session age (%x)\n", (__force u32)itt, session->age); return ISCSI_ERR_BAD_ITT; } if (i >= session->cmds_max) { iscsi_conn_printk(KERN_ERR, conn, "received invalid itt index %u (max cmds " "%u.\n", i, session->cmds_max); return ISCSI_ERR_BAD_ITT; } return 0; } EXPORT_SYMBOL_GPL(iscsi_verify_itt); /** * iscsi_itt_to_ctask - look up ctask by itt * @conn: iscsi connection * @itt: itt * * This should be used for cmd tasks. * * The session lock must be held. */ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt) { struct iscsi_task *task; if (iscsi_verify_itt(conn, itt)) return NULL; task = iscsi_itt_to_task(conn, itt); if (!task || !task->sc) return NULL; if (task->sc->SCp.phase != conn->session->age) { iscsi_session_printk(KERN_ERR, conn->session, "task's session age %d, expected %d\n", task->sc->SCp.phase, conn->session->age); return NULL; } return task; } EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask); void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err) { struct iscsi_conn *conn; struct device *dev; unsigned long flags; spin_lock_irqsave(&session->lock, flags); conn = session->leadconn; if (session->state == ISCSI_STATE_TERMINATE || !conn) { spin_unlock_irqrestore(&session->lock, flags); return; } dev = get_device(&conn->cls_conn->dev); spin_unlock_irqrestore(&session->lock, flags); if (!dev) return; /* * if the host is being removed bypass the connection * recovery initialization because we are going to kill * the session. */ if (err == ISCSI_ERR_INVALID_HOST) iscsi_conn_error_event(conn->cls_conn, err); else iscsi_conn_failure(conn, err); put_device(dev); } EXPORT_SYMBOL_GPL(iscsi_session_failure); void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) { struct iscsi_session *session = conn->session; unsigned long flags; spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_STATE_FAILED) { spin_unlock_irqrestore(&session->lock, flags); return; } if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; spin_unlock_irqrestore(&session->lock, flags); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); iscsi_conn_error_event(conn->cls_conn, err); } EXPORT_SYMBOL_GPL(iscsi_conn_failure); static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; /* * Check for iSCSI window and take care of CmdSN wrap-around */ if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) { ISCSI_DBG_SESSION(session, "iSCSI CmdSN closed. ExpCmdSn " "%u MaxCmdSN %u CmdSN %u/%u\n", session->exp_cmdsn, session->max_cmdsn, session->cmdsn, session->queued_cmdsn); return -ENOSPC; } return 0; } static int iscsi_xmit_task(struct iscsi_conn *conn) { struct iscsi_task *task = conn->task; int rc; if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) return -ENODATA; __iscsi_get_task(task); spin_unlock_bh(&conn->session->lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->lock); if (!rc) { /* done with this task */ task->last_xfer = jiffies; conn->task = NULL; } __iscsi_put_task(task); return rc; } /** * iscsi_requeue_task - requeue task to run from session workqueue * @task: task to requeue * * LLDs that need to run a task from the session workqueue should call * this. The session lock must be held. This should only be called * by software drivers. */ void iscsi_requeue_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; /* * this may be on the requeue list already if the xmit_task callout * is handling the r2ts while we are adding new ones */ if (list_empty(&task->running)) list_add_tail(&task->running, &conn->requeue); iscsi_conn_queue_work(conn); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); /** * iscsi_data_xmit - xmit any command into the scheduled connection * @conn: iscsi connection * * Notes: * The function can return -EAGAIN in which case the caller must * re-schedule it again later or recover. '0' return code means * successful xmit. **/ static int iscsi_data_xmit(struct iscsi_conn *conn) { struct iscsi_task *task; int rc = 0; spin_lock_bh(&conn->session->lock); if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); spin_unlock_bh(&conn->session->lock); return -ENODATA; } if (conn->task) { rc = iscsi_xmit_task(conn); if (rc) goto done; } /* * process mgmt pdus like nops before commands since we should * only have one nop-out as a ping from us and targets should not * overflow us with nop-ins */ check_mgmt: while (!list_empty(&conn->mgmtqueue)) { conn->task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); list_del_init(&conn->task->running); if (iscsi_prep_mgmt_task(conn, conn->task)) { __iscsi_put_task(conn->task); conn->task = NULL; continue; } rc = iscsi_xmit_task(conn); if (rc) goto done; } /* process pending command queue */ while (!list_empty(&conn->cmdqueue)) { conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, running); list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { fail_scsi_task(conn->task, DID_IMM_RETRY); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { if (rc == -ENOMEM || rc == -EACCES) { list_add_tail(&conn->task->running, &conn->cmdqueue); conn->task = NULL; goto done; } else fail_scsi_task(conn->task, DID_ABORT); continue; } rc = iscsi_xmit_task(conn); if (rc) goto done; /* * we could continuously get new task requests so * we need to check the mgmt queue for nops that need to * be sent to aviod starvation */ if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } while (!list_empty(&conn->requeue)) { /* * we always do fastlogout - conn stop code will clean up. */ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) break; task = list_entry(conn->requeue.next, struct iscsi_task, running); if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT)) break; conn->task = task; list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; rc = iscsi_xmit_task(conn); if (rc) goto done; if (!list_empty(&conn->mgmtqueue)) goto check_mgmt; } spin_unlock_bh(&conn->session->lock); return -ENODATA; done: spin_unlock_bh(&conn->session->lock); return rc; } static void iscsi_xmitworker(struct work_struct *work) { struct iscsi_conn *conn = container_of(work, struct iscsi_conn, xmitwork); int rc; /* * serialize Xmit worker on a per-connection basis. */ do { rc = iscsi_data_xmit(conn); } while (rc >= 0 || rc == -EAGAIN); } static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, struct scsi_cmnd *sc) { struct iscsi_task *task; if (!kfifo_out(&conn->session->cmdpool.queue, (void *) &task, sizeof(void *))) return NULL; sc->SCp.phase = conn->session->age; sc->SCp.ptr = (char *) task; atomic_set(&task->refcount, 1); task->state = ISCSI_TASK_PENDING; task->conn = conn; task->sc = sc; task->have_checked_conn = false; task->last_timeout = jiffies; task->last_xfer = jiffies; INIT_LIST_HEAD(&task->running); return task; } enum { FAILURE_BAD_HOST = 1, FAILURE_SESSION_FAILED, FAILURE_SESSION_FREED, FAILURE_WINDOW_CLOSED, FAILURE_OOM, FAILURE_SESSION_TERMINATE, FAILURE_SESSION_IN_RECOVERY, FAILURE_SESSION_RECOVERY_TIMEOUT, FAILURE_SESSION_LOGGING_OUT, FAILURE_SESSION_NOT_READY, }; int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct iscsi_cls_session *cls_session; struct Scsi_Host *host; struct iscsi_host *ihost; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_task *task = NULL; sc->scsi_done = done; sc->result = 0; sc->SCp.ptr = NULL; host = sc->device->host; ihost = shost_priv(host); spin_unlock(host->host_lock); cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; spin_lock(&session->lock); reason = iscsi_session_chkready(cls_session); if (reason) { sc->result = reason; goto fault; } if (session->state != ISCSI_STATE_LOGGED_IN) { /* * to handle the race between when we set the recovery state * and block the session we requeue here (commands could * be entering our queuecommand while a block is starting * up because the block code is not locked) */ switch (session->state) { case ISCSI_STATE_FAILED: case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; break; case ISCSI_STATE_LOGGING_OUT: reason = FAILURE_SESSION_LOGGING_OUT; sc->result = DID_IMM_RETRY << 16; break; case ISCSI_STATE_RECOVERY_FAILED: reason = FAILURE_SESSION_RECOVERY_TIMEOUT; sc->result = DID_TRANSPORT_FAILFAST << 16; break; case ISCSI_STATE_TERMINATE: reason = FAILURE_SESSION_TERMINATE; sc->result = DID_NO_CONNECT << 16; break; default: reason = FAILURE_SESSION_FREED; sc->result = DID_NO_CONNECT << 16; } goto fault; } conn = session->leadconn; if (!conn) { reason = FAILURE_SESSION_FREED; sc->result = DID_NO_CONNECT << 16; goto fault; } if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_REQUEUE; goto fault; } if (iscsi_check_cmdsn_window_closed(conn)) { reason = FAILURE_WINDOW_CLOSED; goto reject; } task = iscsi_alloc_task(conn, sc); if (!task) { reason = FAILURE_OOM; goto reject; } if (!ihost->workq) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { if (reason == -ENOMEM || reason == -EACCES) { reason = FAILURE_OOM; goto prepd_reject; } else { sc->result = DID_ABORT << 16; goto prepd_fault; } } if (session->tt->xmit_task(task)) { session->cmdsn--; reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } } else { list_add_tail(&task->running, &conn->cmdqueue); iscsi_conn_queue_work(conn); } session->queued_cmdsn++; spin_unlock(&session->lock); spin_lock(host->host_lock); return 0; prepd_reject: sc->scsi_done = NULL; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); reject: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason); spin_lock(host->host_lock); return SCSI_MLQUEUE_TARGET_BUSY; prepd_fault: sc->scsi_done = NULL; iscsi_complete_task(task, ISCSI_TASK_COMPLETED); fault: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); else { scsi_out(sc)->resid = scsi_out(sc)->length; scsi_in(sc)->resid = scsi_in(sc)->length; } done(sc); spin_lock(host->host_lock); return 0; } EXPORT_SYMBOL_GPL(iscsi_queuecommand); int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { switch (reason) { case SCSI_QDEPTH_DEFAULT: scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); break; case SCSI_QDEPTH_QFULL: scsi_track_queue_full(sdev, depth); break; case SCSI_QDEPTH_RAMP_UP: scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); break; default: return -EOPNOTSUPP; } return sdev->queue_depth; } EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); int iscsi_target_alloc(struct scsi_target *starget) { struct iscsi_cls_session *cls_session = starget_to_session(starget); struct iscsi_session *session = cls_session->dd_data; starget->can_queue = session->scsi_cmds_max; return 0; } EXPORT_SYMBOL_GPL(iscsi_target_alloc); static void iscsi_tmf_timedout(unsigned long data) { struct iscsi_conn *conn = (struct iscsi_conn *)data; struct iscsi_session *session = conn->session; spin_lock(&session->lock); if (conn->tmf_state == TMF_QUEUED) { conn->tmf_state = TMF_TIMEDOUT; ISCSI_DBG_EH(session, "tmf timedout\n"); /* unblock eh_abort() */ wake_up(&conn->ehwait); } spin_unlock(&session->lock); } static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, struct iscsi_tm *hdr, int age, int timeout) { struct iscsi_session *session = conn->session; struct iscsi_task *task; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); if (!task) { spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); spin_lock_bh(&session->lock); ISCSI_DBG_EH(session, "tmf exec failure\n"); return -EPERM; } conn->tmfcmd_pdus_cnt++; conn->tmf_timer.expires = timeout * HZ + jiffies; conn->tmf_timer.function = iscsi_tmf_timedout; conn->tmf_timer.data = (unsigned long)conn; add_timer(&conn->tmf_timer); ISCSI_DBG_EH(session, "tmf set timeout\n"); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); /* * block eh thread until: * * 1) tmf response * 2) tmf timeout * 3) session is terminated or restarted or userspace has * given up on recovery */ wait_event_interruptible(conn->ehwait, age != session->age || session->state != ISCSI_STATE_LOGGED_IN || conn->tmf_state != TMF_QUEUED); if (signal_pending(current)) flush_signals(current); del_timer_sync(&conn->tmf_timer); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* if the session drops it will clean up the task */ if (age != session->age || session->state != ISCSI_STATE_LOGGED_IN) return -ENOTCONN; return 0; } /* * Fail commands. session lock held and recv side suspended and xmit * thread flushed */ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, int error) { struct iscsi_task *task; int i; for (i = 0; i < conn->session->cmds_max; i++) { task = conn->session->cmds[i]; if (!task->sc || task->state == ISCSI_TASK_FREE) continue; if (lun != -1 && lun != task->sc->device->lun) continue; ISCSI_DBG_SESSION(conn->session, "failing sc %p itt 0x%x state %d\n", task->sc, task->itt, task->state); fail_scsi_task(task, error); } } /** * iscsi_suspend_queue - suspend iscsi_queuecommand * @conn: iscsi conn to stop queueing IO on * * This grabs the session lock to make sure no one is in * xmit_task/queuecommand, and then sets suspend to prevent * new commands from being queued. This only needs to be called * by offload drivers that need to sync a path like ep disconnect * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi * will call iscsi_start_tx and iscsi_unblock_session when in FFP. */ void iscsi_suspend_queue(struct iscsi_conn *conn) { spin_lock_bh(&conn->session->lock); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); spin_unlock_bh(&conn->session->lock); } EXPORT_SYMBOL_GPL(iscsi_suspend_queue); /** * iscsi_suspend_tx - suspend iscsi_data_xmit * @conn: iscsi conn tp stop processing IO on. * * This function sets the suspend bit to prevent iscsi_data_xmit * from sending new IO, and if work is queued on the xmit thread * it will wait for it to be completed. */ void iscsi_suspend_tx(struct iscsi_conn *conn) { struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); if (ihost->workq) flush_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); iscsi_conn_queue_work(conn); } /* * We want to make sure a ping is in flight. It has timed out. * And we are not busy processing a pdu that is making * progress but got started before the ping and is taking a while * to complete so the ping is just stuck behind it in a queue. */ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) { if (conn->ping_task && time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + (conn->ping_timeout * HZ), jiffies)) return 1; else return 0; } static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) { enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED; struct iscsi_task *task = NULL, *running_task; struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; int i; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); spin_lock(&session->lock); if (session->state != ISCSI_STATE_LOGGED_IN) { /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ rc = BLK_EH_RESET_TIMER; goto done; } conn = session->leadconn; if (!conn) { /* In the middle of shuting down */ rc = BLK_EH_RESET_TIMER; goto done; } task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* * Raced with completion. Just reset timer, and let it * complete normally */ rc = BLK_EH_RESET_TIMER; goto done; } /* * If we have sent (at least queued to the network layer) a pdu or * recvd one for the task since the last timeout ask for * more time. If on the next timeout we have not made progress * we can check if it is the task or connection when we send the * nop as a ping. */ if (time_after(task->last_xfer, task->last_timeout)) { ISCSI_DBG_EH(session, "Command making progress. Asking " "scsi-ml for more time to complete. " "Last data xfer at %lu. Last timeout was at " "%lu\n.", task->last_xfer, task->last_timeout); task->have_checked_conn = false; rc = BLK_EH_RESET_TIMER; goto done; } if (!conn->recv_timeout && !conn->ping_timeout) goto done; /* * if the ping timedout then we are in the middle of cleaning up * and can let the iscsi eh handle it */ if (iscsi_has_ping_timed_out(conn)) { rc = BLK_EH_RESET_TIMER; goto done; } for (i = 0; i < conn->session->cmds_max; i++) { running_task = conn->session->cmds[i]; if (!running_task->sc || running_task == task || running_task->state != ISCSI_TASK_RUNNING) continue; /* * Only check if cmds started before this one have made * progress, or this could never fail */ if (time_after(running_task->sc->jiffies_at_alloc, task->sc->jiffies_at_alloc)) continue; if (time_after(running_task->last_xfer, task->last_timeout)) { /* * This task has not made progress, but a task * started before us has transferred data since * we started/last-checked. We could be queueing * too many tasks or the LU is bad. * * If the device is bad the cmds ahead of us on * other devs will complete, and this loop will * eventually fail starting the scsi eh. */ ISCSI_DBG_EH(session, "Command has not made progress " "but commands ahead of it have. " "Asking scsi-ml for more time to " "complete. Our last xfer vs running task " "last xfer %lu/%lu. Last check %lu.\n", task->last_xfer, running_task->last_xfer, task->last_timeout); rc = BLK_EH_RESET_TIMER; goto done; } } /* Assumes nop timeout is shorter than scsi cmd timeout */ if (task->have_checked_conn) goto done; /* * Checking the transport already or nop from a cmd timeout still * running */ if (conn->ping_task) { task->have_checked_conn = true; rc = BLK_EH_RESET_TIMER; goto done; } /* Make sure there is a transport check done */ iscsi_send_nopout(conn, NULL); task->have_checked_conn = true; rc = BLK_EH_RESET_TIMER; done: if (task) task->last_timeout = jiffies; spin_unlock(&session->lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "nh"); return rc; } static void iscsi_check_transport_timeouts(unsigned long data) { struct iscsi_conn *conn = (struct iscsi_conn *)data; struct iscsi_session *session = conn->session; unsigned long recv_timeout, next_timeout = 0, last_recv; spin_lock(&session->lock); if (session->state != ISCSI_STATE_LOGGED_IN) goto done; recv_timeout = conn->recv_timeout; if (!recv_timeout) goto done; recv_timeout *= HZ; last_recv = conn->last_recv; if (iscsi_has_ping_timed_out(conn)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " "expired, recv timeout %d, last rx %lu, " "last ping %lu, now %lu\n", conn->ping_timeout, conn->recv_timeout, last_recv, conn->last_ping, jiffies); spin_unlock(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return; } if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); iscsi_send_nopout(conn, NULL); next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout); mod_timer(&conn->transport_timer, next_timeout); done: spin_unlock(&session->lock); } static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; memcpy(hdr->lun, task->lun, sizeof(hdr->lun)); hdr->rtt = task->hdr_itt; hdr->refcmdsn = task->cmdsn; } int iscsi_eh_abort(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_task *task; struct iscsi_tm *hdr; int rc, age; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "aborting sc %p\n", sc); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* * if session was ISCSI_STATE_IN_RECOVERY then we may not have * got the command. */ if (!sc->SCp.ptr) { ISCSI_DBG_EH(session, "sc never reached iscsi layer or " "it completed.\n"); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } /* * If we are not logged in or we have started a new session * then let the host reset code handle this */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN || sc->SCp.phase != session->age) { spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); ISCSI_DBG_EH(session, "failing abort due to dropped " "session.\n"); return FAILED; } conn = session->leadconn; conn->eh_abort_cnt++; age = session->age; task = (struct iscsi_task *)sc->SCp.ptr; ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt); /* task completed before time out */ if (!task->sc) { ISCSI_DBG_EH(session, "sc completed while abort in progress\n"); goto success; } if (task->state == ISCSI_TASK_PENDING) { fail_scsi_task(task, DID_ABORT); goto success; } /* only have one tmf outstanding at a time */ if (conn->tmf_state != TMF_INITIAL) goto failed; conn->tmf_state = TMF_QUEUED; hdr = &conn->tmhdr; iscsi_prep_abort_task_pdu(task, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) { rc = FAILED; goto failed; } switch (conn->tmf_state) { case TMF_SUCCESS: spin_unlock_bh(&session->lock); /* * stop tx side incase the target had sent a abort rsp but * the initiator was still writing out data. */ iscsi_suspend_tx(conn); /* * we do not stop the recv side because targets have been * good and have never sent us a successful tmf response * then sent more data for the cmd. */ spin_lock_bh(&session->lock); fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); spin_unlock_bh(&session->lock); iscsi_start_tx(conn); goto success_unlocked; case TMF_TIMEDOUT: spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); goto failed_unlocked; case TMF_NOT_FOUND: if (!sc->SCp.ptr) { conn->tmf_state = TMF_INITIAL; memset(hdr, 0, sizeof(*hdr)); /* task completed before tmf abort response */ ISCSI_DBG_EH(session, "sc completed while abort in " "progress\n"); goto success; } /* fall through */ default: conn->tmf_state = TMF_INITIAL; goto failed; } success: spin_unlock_bh(&session->lock); success_unlocked: ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n", sc, task->itt); mutex_unlock(&session->eh_mutex); return SUCCESS; failed: spin_unlock_bh(&session->lock); failed_unlocked: ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc, task ? task->itt : 0); mutex_unlock(&session->eh_mutex); return FAILED; } EXPORT_SYMBOL_GPL(iscsi_eh_abort); static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); hdr->rtt = RESERVED_ITT; } int iscsi_eh_device_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) goto unlock; conn = session->leadconn; /* only have one tmf outstanding at a time */ if (conn->tmf_state != TMF_INITIAL) goto unlock; conn->tmf_state = TMF_QUEUED; hdr = &conn->tmhdr; iscsi_prep_lun_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, session->lu_reset_timeout)) { rc = FAILED; goto unlock; } switch (conn->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); goto done; default: conn->tmf_state = TMF_INITIAL; goto unlock; } rc = SUCCESS; spin_unlock_bh(&session->lock); iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); iscsi_start_tx(conn); goto done; unlock: spin_unlock_bh(&session->lock); done: ISCSI_DBG_EH(session, "dev reset result = %s\n", rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&session->eh_mutex); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_device_reset); void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; spin_lock_bh(&session->lock); if (session->state != ISCSI_STATE_LOGGED_IN) { session->state = ISCSI_STATE_RECOVERY_FAILED; if (session->leadconn) wake_up(&session->leadconn->ehwait); } spin_unlock_bh(&session->lock); } EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); /** * iscsi_eh_session_reset - drop session and attempt relogin * @sc: scsi command * * This function will wait for a relogin, session termination from * userspace, or a recovery/replacement timeout. */ int iscsi_eh_session_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; conn = session->leadconn; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (session->state == ISCSI_STATE_TERMINATE) { failed: ISCSI_DBG_EH(session, "failing session reset: Could not log back into " "%s, %s [age %d]\n", session->targetname, conn->persistent_address, session->age); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); return FAILED; } spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); /* * we drop the lock here but the leadconn cannot be destoyed while * we are in the scsi eh */ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); ISCSI_DBG_EH(session, "wait for relogin\n"); wait_event_interruptible(conn->ehwait, session->state == ISCSI_STATE_TERMINATE || session->state == ISCSI_STATE_LOGGED_IN || session->state == ISCSI_STATE_RECOVERY_FAILED); if (signal_pending(current)) flush_signals(current); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (session->state == ISCSI_STATE_LOGGED_IN) { ISCSI_DBG_EH(session, "session reset succeeded for %s,%s\n", session->targetname, conn->persistent_address); } else goto failed; spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); return SUCCESS; } EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) { memset(hdr, 0, sizeof(*hdr)); hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE; hdr->flags = ISCSI_TM_FUNC_TARGET_WARM_RESET & ISCSI_FLAG_TM_FUNC_MASK; hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->rtt = RESERVED_ITT; } /** * iscsi_eh_target_reset - reset target * @sc: scsi command * * This will attempt to send a warm target reset. */ int iscsi_eh_target_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tm *hdr; int rc = FAILED; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; ISCSI_DBG_EH(session, "tgt Reset [sc %p tgt %s]\n", sc, session->targetname); mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* * Just check if we are not logged in. We cannot check for * the phase because the reset could come from a ioctl. */ if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) goto unlock; conn = session->leadconn; /* only have one tmf outstanding at a time */ if (conn->tmf_state != TMF_INITIAL) goto unlock; conn->tmf_state = TMF_QUEUED; hdr = &conn->tmhdr; iscsi_prep_tgt_reset_pdu(sc, hdr); if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age, session->tgt_reset_timeout)) { rc = FAILED; goto unlock; } switch (conn->tmf_state) { case TMF_SUCCESS: break; case TMF_TIMEDOUT: spin_unlock_bh(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); goto done; default: conn->tmf_state = TMF_INITIAL; goto unlock; } rc = SUCCESS; spin_unlock_bh(&session->lock); iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); memset(hdr, 0, sizeof(*hdr)); fail_scsi_tasks(conn, -1, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); iscsi_start_tx(conn); goto done; unlock: spin_unlock_bh(&session->lock); done: ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&session->eh_mutex); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); /** * iscsi_eh_recover_target - reset target and possibly the session * @sc: scsi command * * This will attempt to send a warm target reset. If that fails, * we will escalate to ERL0 session recovery. */ int iscsi_eh_recover_target(struct scsi_cmnd *sc) { int rc; rc = iscsi_eh_target_reset(sc); if (rc == FAILED) rc = iscsi_eh_session_reset(sc); return rc; } EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); /* * Pre-allocate a pool of @max items of @item_size. By default, the pool * should be accessed via kfifo_{get,put} on q->queue. * Optionally, the caller can obtain the array of object pointers * by passing in a non-NULL @items pointer */ int iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) { int i, num_arrays = 1; memset(q, 0, sizeof(*q)); q->max = max; /* If the user passed an items pointer, he wants a copy of * the array. */ if (items) num_arrays++; q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL); if (q->pool == NULL) return -ENOMEM; kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); for (i = 0; i < max; i++) { q->pool[i] = kzalloc(item_size, GFP_KERNEL); if (q->pool[i] == NULL) { q->max = i; goto enomem; } kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); } if (items) { *items = q->pool + max; memcpy(*items, q->pool, max * sizeof(void *)); } return 0; enomem: iscsi_pool_free(q); return -ENOMEM; } EXPORT_SYMBOL_GPL(iscsi_pool_init); void iscsi_pool_free(struct iscsi_pool *q) { int i; for (i = 0; i < q->max; i++) kfree(q->pool[i]); kfree(q->pool); } EXPORT_SYMBOL_GPL(iscsi_pool_free); /** * iscsi_host_add - add host to system * @shost: scsi host * @pdev: parent device * * This should be called by partial offload and software iscsi drivers * to add a host to the system. */ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev) { if (!shost->can_queue) shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX; if (!shost->cmd_per_lun) shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN; if (!shost->transportt->eh_timed_out) shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out; return scsi_add_host(shost, pdev); } EXPORT_SYMBOL_GPL(iscsi_host_add); /** * iscsi_host_alloc - allocate a host and driver data * @sht: scsi host template * @dd_data_size: driver host data size * @xmit_can_sleep: bool indicating if LLD will queue IO from a work queue * * This should be called by partial offload and software iscsi drivers. * To access the driver specific memory use the iscsi_host_priv() macro. */ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, int dd_data_size, bool xmit_can_sleep) { struct Scsi_Host *shost; struct iscsi_host *ihost; shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size); if (!shost) return NULL; ihost = shost_priv(shost); if (xmit_can_sleep) { snprintf(ihost->workq_name, sizeof(ihost->workq_name), "iscsi_q_%d", shost->host_no); ihost->workq = create_singlethread_workqueue(ihost->workq_name); if (!ihost->workq) goto free_host; } spin_lock_init(&ihost->lock); ihost->state = ISCSI_HOST_SETUP; ihost->num_sessions = 0; init_waitqueue_head(&ihost->session_removal_wq); return shost; free_host: scsi_host_put(shost); return NULL; } EXPORT_SYMBOL_GPL(iscsi_host_alloc); static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session) { iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_INVALID_HOST); } /** * iscsi_host_remove - remove host and sessions * @shost: scsi host * * If there are any sessions left, this will initiate the removal and wait * for the completion. */ void iscsi_host_remove(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); unsigned long flags; spin_lock_irqsave(&ihost->lock, flags); ihost->state = ISCSI_HOST_REMOVED; spin_unlock_irqrestore(&ihost->lock, flags); iscsi_host_for_each_session(shost, iscsi_notify_host_removed); wait_event_interruptible(ihost->session_removal_wq, ihost->num_sessions == 0); if (signal_pending(current)) flush_signals(current); scsi_remove_host(shost); if (ihost->workq) destroy_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_host_remove); void iscsi_host_free(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); kfree(ihost->netdev); kfree(ihost->hwaddress); kfree(ihost->initiatorname); scsi_host_put(shost); } EXPORT_SYMBOL_GPL(iscsi_host_free); static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost) { struct iscsi_host *ihost = shost_priv(shost); unsigned long flags; shost = scsi_host_get(shost); if (!shost) { printk(KERN_ERR "Invalid state. Cannot notify host removal " "of session teardown event because host already " "removed.\n"); return; } spin_lock_irqsave(&ihost->lock, flags); ihost->num_sessions--; if (ihost->num_sessions == 0) wake_up(&ihost->session_removal_wq); spin_unlock_irqrestore(&ihost->lock, flags); scsi_host_put(shost); } /** * iscsi_session_setup - create iscsi cls session and host and session * @iscsit: iscsi transport template * @shost: scsi host * @cmds_max: session can queue * @cmd_task_size: LLD task private data size * @initial_cmdsn: initial CmdSN * * This can be used by software iscsi_transports that allocate * a session per scsi host. * * Callers should set cmds_max to the largest total numer (mgmt + scsi) of * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks * for nop handling and login/logout requests. */ struct iscsi_cls_session * iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, uint16_t cmds_max, int dd_size, int cmd_task_size, uint32_t initial_cmdsn, unsigned int id) { struct iscsi_host *ihost = shost_priv(shost); struct iscsi_session *session; struct iscsi_cls_session *cls_session; int cmd_i, scsi_cmds, total_cmds = cmds_max; unsigned long flags; spin_lock_irqsave(&ihost->lock, flags); if (ihost->state == ISCSI_HOST_REMOVED) { spin_unlock_irqrestore(&ihost->lock, flags); return NULL; } ihost->num_sessions++; spin_unlock_irqrestore(&ihost->lock, flags); if (!total_cmds) total_cmds = ISCSI_DEF_XMIT_CMDS_MAX; /* * The iscsi layer needs some tasks for nop handling and tmfs, * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX * + 1 command for scsi IO. */ if (total_cmds < ISCSI_TOTAL_CMDS_MIN) { printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " "must be a power of two that is at least %d.\n", total_cmds, ISCSI_TOTAL_CMDS_MIN); goto dec_session_count; } if (total_cmds > ISCSI_TOTAL_CMDS_MAX) { printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " "must be a power of 2 less than or equal to %d.\n", cmds_max, ISCSI_TOTAL_CMDS_MAX); total_cmds = ISCSI_TOTAL_CMDS_MAX; } if (!is_power_of_2(total_cmds)) { printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue " "must be a power of 2.\n", total_cmds); total_cmds = rounddown_pow_of_two(total_cmds); if (total_cmds < ISCSI_TOTAL_CMDS_MIN) return NULL; printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n", total_cmds); } scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX; cls_session = iscsi_alloc_session(shost, iscsit, sizeof(struct iscsi_session) + dd_size); if (!cls_session) goto dec_session_count; session = cls_session->dd_data; session->cls_session = cls_session; session->host = shost; session->state = ISCSI_STATE_FREE; session->fast_abort = 1; session->tgt_reset_timeout = 30; session->lu_reset_timeout = 15; session->abort_timeout = 10; session->scsi_cmds_max = scsi_cmds; session->cmds_max = total_cmds; session->queued_cmdsn = session->cmdsn = initial_cmdsn; session->exp_cmdsn = initial_cmdsn + 1; session->max_cmdsn = initial_cmdsn + 1; session->max_r2t = 1; session->tt = iscsit; session->dd_data = cls_session->dd_data + sizeof(*session); mutex_init(&session->eh_mutex); spin_lock_init(&session->lock); /* initialize SCSI PDU commands pool */ if (iscsi_pool_init(&session->cmdpool, session->cmds_max, (void***)&session->cmds, cmd_task_size + sizeof(struct iscsi_task))) goto cmdpool_alloc_fail; /* pre-format cmds pool with ITT */ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) { struct iscsi_task *task = session->cmds[cmd_i]; if (cmd_task_size) task->dd_data = &task[1]; task->itt = cmd_i; task->state = ISCSI_TASK_FREE; INIT_LIST_HEAD(&task->running); } if (!try_module_get(iscsit->owner)) goto module_get_fail; if (iscsi_add_session(cls_session, id)) goto cls_session_fail; return cls_session; cls_session_fail: module_put(iscsit->owner); module_get_fail: iscsi_pool_free(&session->cmdpool); cmdpool_alloc_fail: iscsi_free_session(cls_session); dec_session_count: iscsi_host_dec_session_cnt(shost); return NULL; } EXPORT_SYMBOL_GPL(iscsi_session_setup); /** * iscsi_session_teardown - destroy session, host, and cls_session * @cls_session: iscsi session * * The driver must have called iscsi_remove_session before * calling this. */ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; struct module *owner = cls_session->transport->owner; struct Scsi_Host *shost = session->host; iscsi_pool_free(&session->cmdpool); kfree(session->password); kfree(session->password_in); kfree(session->username); kfree(session->username_in); kfree(session->targetname); kfree(session->initiatorname); kfree(session->ifacename); iscsi_destroy_session(cls_session); iscsi_host_dec_session_cnt(shost); module_put(owner); } EXPORT_SYMBOL_GPL(iscsi_session_teardown); /** * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn * @cls_session: iscsi_cls_session * @dd_size: private driver data size * @conn_idx: cid */ struct iscsi_cls_conn * iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, uint32_t conn_idx) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn; struct iscsi_cls_conn *cls_conn; char *data; cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size, conn_idx); if (!cls_conn) return NULL; conn = cls_conn->dd_data; memset(conn, 0, sizeof(*conn) + dd_size); conn->dd_data = cls_conn->dd_data + sizeof(*conn); conn->session = session; conn->cls_conn = cls_conn; conn->c_stage = ISCSI_CONN_INITIAL_STAGE; conn->id = conn_idx; conn->exp_statsn = 0; conn->tmf_state = TMF_INITIAL; init_timer(&conn->transport_timer); conn->transport_timer.data = (unsigned long)conn; conn->transport_timer.function = iscsi_check_transport_timeouts; INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); /* allocate login_task used for the login/text sequences */ spin_lock_bh(&session->lock); if (!kfifo_out(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*))) { spin_unlock_bh(&session->lock); goto login_task_alloc_fail; } spin_unlock_bh(&session->lock); data = (char *) __get_free_pages(GFP_KERNEL, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); if (!data) goto login_task_data_alloc_fail; conn->login_task->data = conn->data = data; init_timer(&conn->tmf_timer); init_waitqueue_head(&conn->ehwait); return cls_conn; login_task_data_alloc_fail: kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); login_task_alloc_fail: iscsi_destroy_conn(cls_conn); return NULL; } EXPORT_SYMBOL_GPL(iscsi_conn_setup); /** * iscsi_conn_teardown - teardown iscsi connection * cls_conn: iscsi class connection * * TODO: we may need to make this into a two step process * like scsi-mls remove + put host */ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; unsigned long flags; del_timer_sync(&conn->transport_timer); spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; if (session->leadconn == conn) { /* * leading connection? then give up on recovery. */ session->state = ISCSI_STATE_TERMINATE; wake_up(&conn->ehwait); } spin_unlock_bh(&session->lock); /* * Block until all in-progress commands for this connection * time out or fail. */ for (;;) { spin_lock_irqsave(session->host->host_lock, flags); if (!session->host->host_busy) { /* OK for ERL == 0 */ spin_unlock_irqrestore(session->host->host_lock, flags); break; } spin_unlock_irqrestore(session->host->host_lock, flags); msleep_interruptible(500); iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): " "host_busy %d host_failed %d\n", session->host->host_busy, session->host->host_failed); /* * force eh_abort() to unblock */ wake_up(&conn->ehwait); } /* flush queued up work because we free the connection below */ iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); free_pages((unsigned long) conn->data, get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); kfree(conn->persistent_address); kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, sizeof(void*)); if (session->leadconn == conn) session->leadconn = NULL; spin_unlock_bh(&session->lock); iscsi_destroy_conn(cls_conn); } EXPORT_SYMBOL_GPL(iscsi_conn_teardown); int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; if (!session) { iscsi_conn_printk(KERN_ERR, conn, "can't start unbound connection\n"); return -EPERM; } if ((session->imm_data_en || !session->initial_r2t_en) && session->first_burst > session->max_burst) { iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: " "first_burst %d max_burst %d\n", session->first_burst, session->max_burst); return -EINVAL; } if (conn->ping_timeout && !conn->recv_timeout) { iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of " "zero. Using 5 seconds\n."); conn->recv_timeout = 5; } if (conn->recv_timeout && !conn->ping_timeout) { iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of " "zero. Using 5 seconds.\n"); conn->ping_timeout = 5; } spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_STARTED; session->state = ISCSI_STATE_LOGGED_IN; session->queued_cmdsn = session->cmdsn; conn->last_recv = jiffies; conn->last_ping = jiffies; if (conn->recv_timeout && conn->ping_timeout) mod_timer(&conn->transport_timer, jiffies + (conn->recv_timeout * HZ)); switch(conn->stop_stage) { case STOP_CONN_RECOVER: /* * unblock eh_abort() if it is blocked. re-try all * commands after successful recovery */ conn->stop_stage = 0; conn->tmf_state = TMF_INITIAL; session->age++; if (session->age == 16) session->age = 0; break; case STOP_CONN_TERM: conn->stop_stage = 0; break; default: break; } spin_unlock_bh(&session->lock); iscsi_unblock_session(session->cls_session); wake_up(&conn->ehwait); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_start); static void fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) { struct iscsi_task *task; int i, state; for (i = 0; i < conn->session->cmds_max; i++) { task = conn->session->cmds[i]; if (task->sc) continue; if (task->state == ISCSI_TASK_FREE) continue; ISCSI_DBG_SESSION(conn->session, "failing mgmt itt 0x%x state %d\n", task->itt, task->state); state = ISCSI_TASK_ABRT_SESS_RECOV; if (task->state == ISCSI_TASK_PENDING) state = ISCSI_TASK_COMPLETED; iscsi_complete_task(task, state); } } static void iscsi_start_session_recovery(struct iscsi_session *session, struct iscsi_conn *conn, int flag) { int old_stop_stage; mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (conn->stop_stage == STOP_CONN_TERM) { spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); return; } /* * When this is called for the in_login state, we only want to clean * up the login task and connection. We do not need to block and set * the recovery state again */ if (flag == STOP_CONN_TERM) session->state = ISCSI_STATE_TERMINATE; else if (conn->stop_stage != STOP_CONN_RECOVER) session->state = ISCSI_STATE_IN_RECOVERY; old_stop_stage = conn->stop_stage; conn->stop_stage = flag; spin_unlock_bh(&session->lock); del_timer_sync(&conn->transport_timer); iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); conn->c_stage = ISCSI_CONN_STOPPED; spin_unlock_bh(&session->lock); /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization * in hdr_extract() and will be re-negotiated at * set_param() time. */ if (flag == STOP_CONN_RECOVER) { conn->hdrdgst_en = 0; conn->datadgst_en = 0; if (session->state == ISCSI_STATE_IN_RECOVERY && old_stop_stage != STOP_CONN_RECOVER) { ISCSI_DBG_SESSION(session, "blocking session\n"); iscsi_block_session(session->cls_session); } } /* * flush queues. */ spin_lock_bh(&session->lock); fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); fail_mgmt_tasks(session, conn); memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); } void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; switch (flag) { case STOP_CONN_RECOVER: case STOP_CONN_TERM: iscsi_start_session_recovery(session, conn, flag); break; default: iscsi_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", flag); } } EXPORT_SYMBOL_GPL(iscsi_conn_stop); int iscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, int is_leading) { struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; spin_lock_bh(&session->lock); if (is_leading) session->leadconn = conn; spin_unlock_bh(&session->lock); /* * Unblock xmitworker(), Login Phase will pass through. */ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); return 0; } EXPORT_SYMBOL_GPL(iscsi_conn_bind); static int iscsi_switch_str_param(char **param, char *new_val_buf) { char *new_val; if (*param) { if (!strcmp(*param, new_val_buf)) return 0; } new_val = kstrdup(new_val_buf, GFP_NOIO); if (!new_val) return -ENOMEM; kfree(*param); *param = new_val; return 0; } int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; uint32_t value; switch(param) { case ISCSI_PARAM_FAST_ABORT: sscanf(buf, "%d", &session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: sscanf(buf, "%d", &session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: sscanf(buf, "%d", &session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: sscanf(buf, "%d", &session->tgt_reset_timeout); break; case ISCSI_PARAM_PING_TMO: sscanf(buf, "%d", &conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: sscanf(buf, "%d", &conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: sscanf(buf, "%d", &conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: sscanf(buf, "%d", &conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: sscanf(buf, "%d", &conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: sscanf(buf, "%d", &conn->datadgst_en); break; case ISCSI_PARAM_INITIAL_R2T_EN: sscanf(buf, "%d", &session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: sscanf(buf, "%d", &session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: sscanf(buf, "%d", &session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: sscanf(buf, "%d", &session->first_burst); break; case ISCSI_PARAM_MAX_BURST: sscanf(buf, "%d", &session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: sscanf(buf, "%d", &session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: sscanf(buf, "%d", &session->dataseq_inorder_en); break; case ISCSI_PARAM_ERL: sscanf(buf, "%d", &session->erl); break; case ISCSI_PARAM_IFMARKER_EN: sscanf(buf, "%d", &value); BUG_ON(value); break; case ISCSI_PARAM_OFMARKER_EN: sscanf(buf, "%d", &value); BUG_ON(value); break; case ISCSI_PARAM_EXP_STATSN: sscanf(buf, "%u", &conn->exp_statsn); break; case ISCSI_PARAM_USERNAME: return iscsi_switch_str_param(&session->username, buf); case ISCSI_PARAM_USERNAME_IN: return iscsi_switch_str_param(&session->username_in, buf); case ISCSI_PARAM_PASSWORD: return iscsi_switch_str_param(&session->password, buf); case ISCSI_PARAM_PASSWORD_IN: return iscsi_switch_str_param(&session->password_in, buf); case ISCSI_PARAM_TARGET_NAME: return iscsi_switch_str_param(&session->targetname, buf); case ISCSI_PARAM_TPGT: sscanf(buf, "%d", &session->tpgt); break; case ISCSI_PARAM_PERSISTENT_PORT: sscanf(buf, "%d", &conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: return iscsi_switch_str_param(&conn->persistent_address, buf); case ISCSI_PARAM_IFACE_NAME: return iscsi_switch_str_param(&session->ifacename, buf); case ISCSI_PARAM_INITIATOR_NAME: return iscsi_switch_str_param(&session->initiatorname, buf); default: return -ENOSYS; } return 0; } EXPORT_SYMBOL_GPL(iscsi_set_param); int iscsi_session_get_param(struct iscsi_cls_session *cls_session, enum iscsi_param param, char *buf) { struct iscsi_session *session = cls_session->dd_data; int len; switch(param) { case ISCSI_PARAM_FAST_ABORT: len = sprintf(buf, "%d\n", session->fast_abort); break; case ISCSI_PARAM_ABORT_TMO: len = sprintf(buf, "%d\n", session->abort_timeout); break; case ISCSI_PARAM_LU_RESET_TMO: len = sprintf(buf, "%d\n", session->lu_reset_timeout); break; case ISCSI_PARAM_TGT_RESET_TMO: len = sprintf(buf, "%d\n", session->tgt_reset_timeout); break; case ISCSI_PARAM_INITIAL_R2T_EN: len = sprintf(buf, "%d\n", session->initial_r2t_en); break; case ISCSI_PARAM_MAX_R2T: len = sprintf(buf, "%hu\n", session->max_r2t); break; case ISCSI_PARAM_IMM_DATA_EN: len = sprintf(buf, "%d\n", session->imm_data_en); break; case ISCSI_PARAM_FIRST_BURST: len = sprintf(buf, "%u\n", session->first_burst); break; case ISCSI_PARAM_MAX_BURST: len = sprintf(buf, "%u\n", session->max_burst); break; case ISCSI_PARAM_PDU_INORDER_EN: len = sprintf(buf, "%d\n", session->pdu_inorder_en); break; case ISCSI_PARAM_DATASEQ_INORDER_EN: len = sprintf(buf, "%d\n", session->dataseq_inorder_en); break; case ISCSI_PARAM_ERL: len = sprintf(buf, "%d\n", session->erl); break; case ISCSI_PARAM_TARGET_NAME: len = sprintf(buf, "%s\n", session->targetname); break; case ISCSI_PARAM_TPGT: len = sprintf(buf, "%d\n", session->tpgt); break; case ISCSI_PARAM_USERNAME: len = sprintf(buf, "%s\n", session->username); break; case ISCSI_PARAM_USERNAME_IN: len = sprintf(buf, "%s\n", session->username_in); break; case ISCSI_PARAM_PASSWORD: len = sprintf(buf, "%s\n", session->password); break; case ISCSI_PARAM_PASSWORD_IN: len = sprintf(buf, "%s\n", session->password_in); break; case ISCSI_PARAM_IFACE_NAME: len = sprintf(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", session->initiatorname); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_session_get_param); int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; int len; switch(param) { case ISCSI_PARAM_PING_TMO: len = sprintf(buf, "%u\n", conn->ping_timeout); break; case ISCSI_PARAM_RECV_TMO: len = sprintf(buf, "%u\n", conn->recv_timeout); break; case ISCSI_PARAM_MAX_RECV_DLENGTH: len = sprintf(buf, "%u\n", conn->max_recv_dlength); break; case ISCSI_PARAM_MAX_XMIT_DLENGTH: len = sprintf(buf, "%u\n", conn->max_xmit_dlength); break; case ISCSI_PARAM_HDRDGST_EN: len = sprintf(buf, "%d\n", conn->hdrdgst_en); break; case ISCSI_PARAM_DATADGST_EN: len = sprintf(buf, "%d\n", conn->datadgst_en); break; case ISCSI_PARAM_IFMARKER_EN: len = sprintf(buf, "%d\n", conn->ifmarker_en); break; case ISCSI_PARAM_OFMARKER_EN: len = sprintf(buf, "%d\n", conn->ofmarker_en); break; case ISCSI_PARAM_EXP_STATSN: len = sprintf(buf, "%u\n", conn->exp_statsn); break; case ISCSI_PARAM_PERSISTENT_PORT: len = sprintf(buf, "%d\n", conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: len = sprintf(buf, "%s\n", conn->persistent_address); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_conn_get_param); int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_host *ihost = shost_priv(shost); int len; switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ihost->initiatorname); break; case ISCSI_HOST_PARAM_IPADDRESS: len = sprintf(buf, "%s\n", ihost->local_address); break; default: return -ENOSYS; } return len; } EXPORT_SYMBOL_GPL(iscsi_host_get_param); int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf, int buflen) { struct iscsi_host *ihost = shost_priv(shost); switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: return iscsi_switch_str_param(&ihost->netdev, buf); case ISCSI_HOST_PARAM_HWADDRESS: return iscsi_switch_str_param(&ihost->hwaddress, buf); case ISCSI_HOST_PARAM_INITIATOR_NAME: return iscsi_switch_str_param(&ihost->initiatorname, buf); default: return -ENOSYS; } return 0; } EXPORT_SYMBOL_GPL(iscsi_host_set_param); MODULE_AUTHOR("Mike Christie"); MODULE_DESCRIPTION("iSCSI library functions"); MODULE_LICENSE("GPL");
gpl-2.0
96boards-bubblegum/linux
drivers/hid/hid-picolcd_lcd.c
938
3530
/*************************************************************************** * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> * * * * Based on Logitech G13 driver (v0.4) * * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, version 2 of the License. * * * * This driver is distributed in the hope that it will be useful, but * * WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * * General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this software. If not see <http://www.gnu.org/licenses/>. * ***************************************************************************/ #include <linux/hid.h> #include <linux/fb.h> #include <linux/lcd.h> #include "hid-picolcd.h" /* * lcd class device */ static int picolcd_get_contrast(struct lcd_device *ldev) { struct picolcd_data *data = lcd_get_data(ldev); return data->lcd_contrast; } static int picolcd_set_contrast(struct lcd_device *ldev, int contrast) { struct picolcd_data *data = lcd_get_data(ldev); struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev); unsigned long flags; if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) return -ENODEV; data->lcd_contrast = contrast & 0x0ff; spin_lock_irqsave(&data->lock, flags); hid_set_field(report->field[0], 0, data->lcd_contrast); if (!(data->status & PICOLCD_FAILED)) hid_hw_request(data->hdev, report, HID_REQ_SET_REPORT); spin_unlock_irqrestore(&data->lock, flags); return 0; } static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb) { return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev)); } static struct lcd_ops picolcd_lcdops = { .get_contrast = picolcd_get_contrast, .set_contrast = picolcd_set_contrast, .check_fb = picolcd_check_lcd_fb, }; int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report) { struct device *dev = &data->hdev->dev; struct lcd_device *ldev; if (!report) return -ENODEV; if (report->maxfield != 1 || report->field[0]->report_count != 1 || report->field[0]->report_size != 8) { dev_err(dev, "unsupported CONTRAST report"); return -EINVAL; } ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops); if (IS_ERR(ldev)) { dev_err(dev, "failed to register LCD\n"); return PTR_ERR(ldev); } ldev->props.max_contrast = 0x0ff; data->lcd_contrast = 0xe5; data->lcd = ldev; picolcd_set_contrast(ldev, 0xe5); return 0; } void picolcd_exit_lcd(struct picolcd_data *data) { struct lcd_device *ldev = data->lcd; data->lcd = NULL; lcd_device_unregister(ldev); } int picolcd_resume_lcd(struct picolcd_data *data) { if (!data->lcd) return 0; return picolcd_set_contrast(data->lcd, data->lcd_contrast); }
gpl-2.0
codename13/android_kernel_ba2x_2.0
drivers/block/xen-blkback/blkback.c
1194
22503
/****************************************************************************** * * Back-end of the driver for virtual block devices. This portion of the * driver exports a 'unified' block-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/block/xen-blkfront.c * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/freezer.h> #include <xen/events.h> #include <xen/page.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include "common.h" /* * These are rather arbitrary. They are fairly large because adjacent requests * pulled from a communication ring are quite likely to end up being part of * the same scatter/gather request at the disc. * * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** * * This will increase the chances of being able to write whole tracks. * 64 should be enough to keep us competitive with Linux. */ static int xen_blkif_reqs = 64; module_param_named(reqs, xen_blkif_reqs, int, 0); MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); /* * Each outstanding request that we've passed to the lower device layers has a * 'pending_req' allocated to it. Each buffer_head that completes decrements * the pendcnt towards zero. When it hits zero, the specified domain has a * response queued for it, with the saved 'id' passed back. */ struct pending_req { struct xen_blkif *blkif; u64 id; int nr_pages; atomic_t pendcnt; unsigned short operation; int status; struct list_head free_list; }; #define BLKBACK_INVALID_HANDLE (~0) struct xen_blkbk { struct pending_req *pending_reqs; /* List of all 'pending_req' available */ struct list_head pending_free; /* And its spinlock. */ spinlock_t pending_free_lock; wait_queue_head_t pending_free_wq; /* The list of all pages that are available. */ struct page **pending_pages; /* And the grant handles that are available. */ grant_handle_t *pending_grant_handles; }; static struct xen_blkbk *blkbk; /* * Little helpful macro to figure out the index and virtual address of the * pending_pages[..]. For each 'pending_req' we have have up to * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through * 10 and would index in the pending_pages[..]. */ static inline int vaddr_pagenr(struct pending_req *req, int seg) { return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; } #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] static inline unsigned long vaddr(struct pending_req *req, int seg) { unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); return (unsigned long)pfn_to_kaddr(pfn); } #define pending_handle(_req, _seg) \ (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) static int do_block_io_op(struct xen_blkif *blkif); static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req); static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st); /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ static struct pending_req *alloc_req(void) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkbk->pending_free_lock, flags); if (!list_empty(&blkbk->pending_free)) { req = list_entry(blkbk->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); return req; } /* * Return the 'pending_req' structure back to the freepool. We also * wake up the thread if it was waiting for a free page. */ static void free_req(struct pending_req *req) { unsigned long flags; int was_empty; spin_lock_irqsave(&blkbk->pending_free_lock, flags); was_empty = list_empty(&blkbk->pending_free); list_add(&req->free_list, &blkbk->pending_free); spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); if (was_empty) wake_up(&blkbk->pending_free_wq); } /* * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, int operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; if ((operation != READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { blkif_sector_t end = req->sector_number + req->nr_sects; if (unlikely(end < req->sector_number)) goto out; if (unlikely(end > vbd_sz(vbd))) goto out; } req->dev = vbd->pdevice; req->bdev = vbd->bdev; rc = 0; out: return rc; } static void xen_vbd_resize(struct xen_blkif *blkif) { struct xen_vbd *vbd = &blkif->vbd; struct xenbus_transaction xbt; int err; struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); unsigned long long new_size = vbd_sz(vbd); pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); vbd->size = new_size; again: err = xenbus_transaction_start(&xbt); if (err) { pr_warn(DRV_PFX "Error starting transaction"); return; } err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(vbd)); if (err) { pr_warn(DRV_PFX "Error writing new size"); goto abort; } /* * Write the current state; we will use this to synchronize * the front-end. If the current state is "connected" the * front-end will get the new size information online. */ err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); if (err) { pr_warn(DRV_PFX "Error writing the state"); goto abort; } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (err) pr_warn(DRV_PFX "Error ending transaction"); return; abort: xenbus_transaction_end(xbt, 1); } /* * Notification from the guest OS. */ static void blkif_notify_work(struct xen_blkif *blkif) { blkif->waiting_reqs = 1; wake_up(&blkif->wq); } irqreturn_t xen_blkif_be_int(int irq, void *dev_id) { blkif_notify_work(dev_id); return IRQ_HANDLED; } /* * SCHEDULER FUNCTIONS */ static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; blkif->st_oo_req = 0; } int xen_blkif_schedule(void *arg) { struct xen_blkif *blkif = arg; struct xen_vbd *vbd = &blkif->vbd; xen_blkif_get(blkif); while (!kthread_should_stop()) { if (try_to_freeze()) continue; if (unlikely(vbd->size != vbd_sz(vbd))) xen_vbd_resize(blkif); wait_event_interruptible( blkif->wq, blkif->waiting_reqs || kthread_should_stop()); wait_event_interruptible( blkbk->pending_free_wq, !list_empty(&blkbk->pending_free) || kthread_should_stop()); blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } if (log_stats) print_stats(blkif); blkif->xenblkd = NULL; xen_blkif_put(blkif); return 0; } struct seg_buf { unsigned long buf; unsigned int nsec; }; /* * Unmap the grant references, and also remove the M2P over-rides * used in the 'pending_req'. */ static void xen_blkbk_unmap(struct pending_req *req) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; grant_handle_t handle; int ret; for (i = 0; i < req->nr_pages; i++) { handle = pending_handle(req, i); if (handle == BLKBACK_INVALID_HANDLE) continue; gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), GNTMAP_host_map, handle); pending_handle(req, i) = BLKBACK_INVALID_HANDLE; invcount++; } ret = HYPERVISOR_grant_table_op( GNTTABOP_unmap_grant_ref, unmap, invcount); BUG_ON(ret); /* * Note, we use invcount, so nr->pages, so we can't index * using vaddr(req, i). */ for (i = 0; i < invcount; i++) { ret = m2p_remove_override( virt_to_page(unmap[i].host_addr), false); if (ret) { pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n", (unsigned long)unmap[i].host_addr); continue; } } } static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_req, struct seg_buf seg[]) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i; int nseg = req->nr_segments; int ret = 0; /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ for (i = 0; i < nseg; i++) { uint32_t flags; flags = GNTMAP_host_map; if (pending_req->operation != BLKIF_OP_READ) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, req->u.rw.seg[i].gref, pending_req->blkif->domid); } ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); BUG_ON(ret); /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (i = 0; i < nseg; i++) { if (unlikely(map[i].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); map[i].handle = BLKBACK_INVALID_HANDLE; ret |= 1; } pending_handle(pending_req, i) = map[i].handle; if (ret) continue; ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), blkbk->pending_page(pending_req, i), false); if (ret) { pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret); /* We could switch over to GNTTABOP_copy */ continue; } seg[i].buf = map[i].dev_bus_addr | (req->u.rw.seg[i].first_sect << 9); } return ret; } /* * Completion callback on the bio's. Called as bh->b_end_io() */ static void __end_block_io_op(struct pending_req *pending_req, int error) { /* An error fails the entire request. */ if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if (error) { pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," " error=%d\n", error); pending_req->status = BLKIF_RSP_ERROR; } /* * If all of the bio's have completed it is time to unmap * the grant references associated with 'request' and provide * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { xen_blkbk_unmap(pending_req); make_response(pending_req->blkif, pending_req->id, pending_req->operation, pending_req->status); xen_blkif_put(pending_req->blkif); free_req(pending_req); } } /* * bio callback. */ static void end_block_io_op(struct bio *bio, int error) { __end_block_io_op(bio->bi_private, error); bio_put(bio); } /* * Function to copy the from the ring buffer the 'struct blkif_request' * (which has the sectors we want, number of them, grant references, etc), * and transmute it to the block API to hand it over to the proper block disk. */ static int do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; struct blkif_request req; struct pending_req *pending_req; RING_IDX rc, rp; int more_to_do = 0; rc = blk_rings->common.req_cons; rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ while (rc != rp) { if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; if (kthread_should_stop()) { more_to_do = 1; break; } pending_req = alloc_req(); if (NULL == pending_req) { blkif->st_oo_req++; more_to_do = 1; break; } switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); break; default: BUG(); } blk_rings->common.req_cons = ++rc; /* before make_response() */ /* Apply all sanity checks to /private copy/ of request. */ barrier(); if (dispatch_rw_block_io(blkif, &req, pending_req)) break; /* Yield point for this unbounded loop. */ cond_resched(); } return more_to_do; } /* * Transmutation of the 'struct blkif_request' to a proper 'struct bio' * and call the 'submit_bio' to pass it to the underlying storage. */ static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { struct phys_req preq; struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int nseg; struct bio *bio = NULL; struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; int i, nbio = 0; int operation; struct blk_plug plug; switch (req->operation) { case BLKIF_OP_READ: blkif->st_rd_req++; operation = READ; break; case BLKIF_OP_WRITE: blkif->st_wr_req++; operation = WRITE_ODIRECT; break; case BLKIF_OP_FLUSH_DISKCACHE: blkif->st_f_req++; operation = WRITE_FLUSH; break; case BLKIF_OP_WRITE_BARRIER: default: operation = 0; /* make gcc happy */ goto fail_response; break; } /* Check that the number of segments is sane. */ nseg = req->nr_segments; if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; } preq.dev = req->handle; preq.sector_number = req->u.rw.sector_number; preq.nr_sects = 0; pending_req->blkif = blkif; pending_req->id = req->id; pending_req->operation = req->operation; pending_req->status = BLKIF_RSP_OKAY; pending_req->nr_pages = nseg; for (i = 0; i < nseg; i++) { seg[i].nsec = req->u.rw.seg[i].last_sect - req->u.rw.seg[i].first_sect + 1; if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) goto fail_response; preq.nr_sects += seg[i].nsec; } if (xen_vbd_translate(&preq, blkif, operation) != 0) { pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, preq.dev); goto fail_response; } /* * This check _MUST_ be done after xen_vbd_translate as the preq.bdev * is set there. */ for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { pr_debug(DRV_PFX "Misaligned I/O request from domain %d", blkif->domid); goto fail_response; } } /* * If we have failed at this point, we need to undo the M2P override, * set gnttab_set_unmap_op on all of the grant references and perform * the hypercall to unmap the grants - that is all done in * xen_blkbk_unmap. */ if (xen_blkbk_map(req, pending_req, seg)) goto fail_flush; /* This corresponding xen_blkif_put is done in __end_block_io_op */ xen_blkif_get(blkif); for (i = 0; i < nseg; i++) { while ((bio == NULL) || (bio_add_page(bio, blkbk->pending_page(pending_req, i), seg[i].nsec << 9, seg[i].buf & ~PAGE_MASK) == 0)) { bio = bio_alloc(GFP_KERNEL, nseg-i); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; } /* This will be hit if the operation was a flush. */ if (!bio) { BUG_ON(operation != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; } /* * We set it one so that the last submit_bio does not have to call * atomic_inc. */ atomic_set(&pending_req->pendcnt, nbio); /* Get a reference count for the disk queue and start sending I/O */ blk_start_plug(&plug); for (i = 0; i < nbio; i++) submit_bio(operation, biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); if (operation == READ) blkif->st_rd_sect += preq.nr_sects; else if (operation & WRITE) blkif->st_wr_sect += preq.nr_sects; return 0; fail_flush: xen_blkbk_unmap(pending_req); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); free_req(pending_req); msleep(1); /* back off a bit */ return -EIO; fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; } /* * Put a response on the ring on how the operation fared. */ static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { struct blkif_response resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int more_to_do = 0; int notify; resp.id = id; resp.operation = op; resp.status = st; spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_32: memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_64: memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), &resp, sizeof(resp)); break; default: BUG(); } blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { /* * Tail check for pending requests. Allows frontend to avoid * notifications if requests are already in flight (lower * overheads and promotes batching). */ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { more_to_do = 1; } spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (more_to_do) blkif_notify_work(blkif); if (notify) notify_remote_via_irq(blkif->irq); } static int __init xen_blkif_init(void) { int i, mmap_pages; int rc = 0; if (!xen_pv_domain()) return -ENODEV; blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); if (!blkbk) { pr_alert(DRV_PFX "%s: out of memory!\n", __func__); return -ENOMEM; } mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * xen_blkif_reqs, GFP_KERNEL); blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * mmap_pages, GFP_KERNEL); blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * mmap_pages, GFP_KERNEL); if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) { rc = -ENOMEM; goto out_of_memory; } for (i = 0; i < mmap_pages; i++) { blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); if (blkbk->pending_pages[i] == NULL) { rc = -ENOMEM; goto out_of_memory; } } rc = xen_blkif_interface_init(); if (rc) goto failed_init; memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); INIT_LIST_HEAD(&blkbk->pending_free); spin_lock_init(&blkbk->pending_free_lock); init_waitqueue_head(&blkbk->pending_free_wq); for (i = 0; i < xen_blkif_reqs; i++) list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free); rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; return 0; out_of_memory: pr_alert(DRV_PFX "%s: out of memory\n", __func__); failed_init: kfree(blkbk->pending_reqs); kfree(blkbk->pending_grant_handles); if (blkbk->pending_pages) { for (i = 0; i < mmap_pages; i++) { if (blkbk->pending_pages[i]) __free_page(blkbk->pending_pages[i]); } kfree(blkbk->pending_pages); } kfree(blkbk); blkbk = NULL; return rc; } module_init(xen_blkif_init); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
zefie/thunderc_kernel_xionia
arch/sparc/prom/bootstr_32.c
1706
1207
/* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <asm/oplib.h> #include <linux/init.h> #define BARG_LEN 256 static char barg_buf[BARG_LEN] = { 0 }; static char fetched __initdata = 0; char * __init prom_getbootargs(void) { int iter; char *cp, *arg; /* This check saves us from a panic when bootfd patches args. */ if (fetched) { return barg_buf; } switch(prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ for(iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if(arg == 0) break; while(*arg != 0) { /* Leave place for space and null. */ if(cp >= barg_buf + BARG_LEN-2){ /* We might issue a warning here. */ break; } *cp++ = *arg++; } *cp++ = ' '; } *cp = 0; break; case PROM_V2: case PROM_V3: /* * V3 PROM cannot supply as with more than 128 bytes * of an argument. But a smart bootstrap loader can. */ strlcpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf)); break; default: break; } fetched = 1; return barg_buf; }
gpl-2.0
kimjh4930/linuxkernel
drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
1962
58937
/****************************************************************************** * * Copyright(c) 2009-2013 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "../base.h" #include "../pci.h" #include "reg.h" #include "def.h" #include "phy.h" #include "dm.h" #include "fw.h" #include "trx.h" static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 0x7f8001fe, /* 0, +6.0dB */ 0x788001e2, /* 1, +5.5dB */ 0x71c001c7, /* 2, +5.0dB */ 0x6b8001ae, /* 3, +4.5dB */ 0x65400195, /* 4, +4.0dB */ 0x5fc0017f, /* 5, +3.5dB */ 0x5a400169, /* 6, +3.0dB */ 0x55400155, /* 7, +2.5dB */ 0x50800142, /* 8, +2.0dB */ 0x4c000130, /* 9, +1.5dB */ 0x47c0011f, /* 10, +1.0dB */ 0x43c0010f, /* 11, +0.5dB */ 0x40000100, /* 12, +0dB */ 0x3c8000f2, /* 13, -0.5dB */ 0x390000e4, /* 14, -1.0dB */ 0x35c000d7, /* 15, -1.5dB */ 0x32c000cb, /* 16, -2.0dB */ 0x300000c0, /* 17, -2.5dB */ 0x2d4000b5, /* 18, -3.0dB */ 0x2ac000ab, /* 19, -3.5dB */ 0x288000a2, /* 20, -4.0dB */ 0x26000098, /* 21, -4.5dB */ 0x24000090, /* 22, -5.0dB */ 0x22000088, /* 23, -5.5dB */ 0x20000080, /* 24, -6.0dB */ 0x1e400079, /* 25, -6.5dB */ 0x1c800072, /* 26, -7.0dB */ 0x1b00006c, /* 27. -7.5dB */ 0x19800066, /* 28, -8.0dB */ 0x18000060, /* 29, -8.5dB */ 0x16c0005b, /* 30, -9.0dB */ 0x15800056, /* 31, -9.5dB */ 0x14400051, /* 32, -10.0dB */ 0x1300004c, /* 33, -10.5dB */ 0x12000048, /* 34, -11.0dB */ 0x11000044, /* 35, -11.5dB */ 0x10000040, /* 36, -12.0dB */ 0x0f00003c, /* 37, -12.5dB */ 0x0e400039, /* 38, -13.0dB */ 0x0d800036, /* 39, -13.5dB */ 0x0cc00033, /* 40, -14.0dB */ 0x0c000030, /* 41, -14.5dB */ 0x0b40002d, /* 42, -15.0dB */ }; static const u8 cck_tbl_ch1_13[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB*/ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB*/ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB*/ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB*/ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB*/ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB*/ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB*/ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB*/ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB*/ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB*/ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB*/ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB*/ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB*/ }; static const u8 cck_tbl_ch14[CCK_TABLE_SIZE][8] = { {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB*/ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB*/ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB*/ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB*/ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB*/ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB*/ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB*/ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB*/ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB*/ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB*/ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB*/ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB*/ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB*/ }; #define CAL_SWING_OFF(_off, _dir, _size, _del) \ do { \ for (_off = 0; _off < _size; _off++) { \ if (_del < thermal_threshold[_dir][_off]) { \ if (_off != 0) \ _off--; \ break; \ } \ } \ if (_off >= _size) \ _off = _size - 1; \ } while (0) static void rtl88e_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index, u8 rfpath, long iqk_result_x, long iqk_result_y) { long ele_a = 0, ele_d, ele_c = 0, value32; ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000)>>22; if (iqk_result_x != 0) { if ((iqk_result_x & 0x00000200) != 0) iqk_result_x = iqk_result_x | 0xFFFFFC00; ele_a = ((iqk_result_x * ele_d)>>8)&0x000003FF; if ((iqk_result_y & 0x00000200) != 0) iqk_result_y = iqk_result_y | 0xFFFFFC00; ele_c = ((iqk_result_y * ele_d)>>8)&0x000003FF; switch (rfpath) { case RF90_PATH_A: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), value32); break; case RF90_PATH_B: value32 = (ele_d << 22)|((ele_c & 0x3F)<<16) | ele_a; rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), value32); break; default: break; } } else { switch (rfpath) { case RF90_PATH_A: rtl_set_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD, ofdmswing_table[ofdm_index]); rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(24), 0x00); break; case RF90_PATH_B: rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBAL, MASKDWORD, ofdmswing_table[ofdm_index]); rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRES, BIT(28), 0x00); break; default: break; } } } void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, u8 *pdirection, u32 *poutwrite_val) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); u8 pwr_val = 0; u8 cck_base = rtldm->swing_idx_cck_base; u8 cck_val = rtldm->swing_idx_cck; u8 ofdm_base = rtldm->swing_idx_ofdm_base; u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A]; if (type == 0) { if (ofdm_val <= ofdm_base) { *pdirection = 1; pwr_val = ofdm_base - ofdm_val; } else { *pdirection = 2; pwr_val = ofdm_val - ofdm_base; } } else if (type == 1) { if (cck_val <= cck_base) { *pdirection = 1; pwr_val = cck_base - cck_val; } else { *pdirection = 2; pwr_val = cck_val - cck_base; } } if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1)) pwr_val = TXPWRTRACK_MAX_IDX; *poutwrite_val = pwr_val | (pwr_val << 8) | (pwr_val << 16) | (pwr_val << 24); } static void rtl88e_chk_tx_track(struct ieee80211_hw *hw, enum pwr_track_control_method method, u8 rfpath, u8 index) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); int jj = rtldm->swing_idx_cck; int i; if (method == TXAGC) { if (rtldm->swing_flag_ofdm == true || rtldm->swing_flag_cck == true) { u8 chan = rtlphy->current_channel; rtl88e_phy_set_txpower_level(hw, chan); rtldm->swing_flag_ofdm = false; rtldm->swing_flag_cck = false; } } else if (method == BBSWING) { if (!rtldm->cck_inch14) { for (i = 0; i < 8; i++) rtl_write_byte(rtlpriv, 0xa22 + i, cck_tbl_ch1_13[jj][i]); } else { for (i = 0; i < 8; i++) rtl_write_byte(rtlpriv, 0xa22 + i, cck_tbl_ch14[jj][i]); } if (rfpath == RF90_PATH_A) { long x = rtlphy->iqk_matrix[index].value[0][0]; long y = rtlphy->iqk_matrix[index].value[0][1]; u8 indx = rtldm->swing_idx_ofdm[rfpath]; rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y); } else if (rfpath == RF90_PATH_B) { u8 indx = rtldm->swing_idx_ofdm[rfpath]; long x = rtlphy->iqk_matrix[indx].value[0][4]; long y = rtlphy->iqk_matrix[indx].value[0][5]; rtl88e_set_iqk_matrix(hw, indx, rfpath, x, y); } } else { return; } } static void rtl88e_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; dm_dig->dig_enable_flag = true; dm_dig->cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); dm_dig->pre_igvalue = 0; dm_dig->cursta_cstate = DIG_STA_DISCONNECT; dm_dig->presta_cstate = DIG_STA_DISCONNECT; dm_dig->curmultista_cstate = DIG_MULTISTA_DISCONNECT; dm_dig->rssi_lowthresh = DM_DIG_THRESH_LOW; dm_dig->rssi_highthresh = DM_DIG_THRESH_HIGH; dm_dig->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; dm_dig->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; dm_dig->rx_gain_max = DM_DIG_MAX; dm_dig->rx_gain_min = DM_DIG_MIN; dm_dig->back_val = DM_DIG_BACKOFF_DEFAULT; dm_dig->back_range_max = DM_DIG_BACKOFF_MAX; dm_dig->back_range_min = DM_DIG_BACKOFF_MIN; dm_dig->pre_cck_cca_thres = 0xff; dm_dig->cur_cck_cca_thres = 0x83; dm_dig->forbidden_igi = DM_DIG_MIN; dm_dig->large_fa_hit = 0; dm_dig->recover_cnt = 0; dm_dig->dig_min_0 = 0x25; dm_dig->dig_min_1 = 0x25; dm_dig->media_connect_0 = false; dm_dig->media_connect_1 = false; rtlpriv->dm.dm_initialgain_enable = true; } static u8 rtl88e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; long rssi_val_min = 0; if ((dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_dig->cursta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) rssi_val_min = (rtlpriv->dm.entry_min_undec_sm_pwdb > rtlpriv->dm.undec_sm_pwdb) ? rtlpriv->dm.undec_sm_pwdb : rtlpriv->dm.entry_min_undec_sm_pwdb; else rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_dig->cursta_cstate == DIG_STA_CONNECT || dm_dig->cursta_cstate == DIG_STA_BEFORE_CONNECT) { rssi_val_min = rtlpriv->dm.undec_sm_pwdb; } else if (dm_dig->curmultista_cstate == DIG_MULTISTA_CONNECT) { rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } return (u8)rssi_val_min; } static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) { u32 ret_value; struct rtl_priv *rtlpriv = rtl_priv(hw); struct false_alarm_statistics *alm_cnt = &(rtlpriv->falsealm_cnt); rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); alm_cnt->cnt_fast_fsync_fail = (ret_value&0xffff); alm_cnt->cnt_sb_search_fail = ((ret_value&0xffff0000)>>16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); alm_cnt->cnt_ofdm_cca = (ret_value&0xffff); alm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); alm_cnt->cnt_rate_illegal = (ret_value & 0xffff); alm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); alm_cnt->cnt_mcs_fail = (ret_value & 0xffff); alm_cnt->cnt_ofdm_fail = alm_cnt->cnt_parity_fail + alm_cnt->cnt_rate_illegal + alm_cnt->cnt_crc8_fail + alm_cnt->cnt_mcs_fail + alm_cnt->cnt_fast_fsync_fail + alm_cnt->cnt_sb_search_fail; ret_value = rtl_get_bbreg(hw, REG_SC_CNT, MASKDWORD); alm_cnt->cnt_bw_lsc = (ret_value & 0xffff); alm_cnt->cnt_bw_usc = ((ret_value & 0xffff0000) >> 16); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(12), 1); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); alm_cnt->cnt_cck_fail = ret_value; ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); alm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; ret_value = rtl_get_bbreg(hw, RCCK0_CCA_CNT, MASKDWORD); alm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | ((ret_value&0xFF00)>>8); alm_cnt->cnt_all = alm_cnt->cnt_fast_fsync_fail + alm_cnt->cnt_sb_search_fail + alm_cnt->cnt_parity_fail + alm_cnt->cnt_rate_illegal + alm_cnt->cnt_crc8_fail + alm_cnt->cnt_mcs_fail + alm_cnt->cnt_cck_fail; alm_cnt->cnt_cca_all = alm_cnt->cnt_ofdm_cca + alm_cnt->cnt_cck_cca; rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 1); rtl_set_bbreg(hw, ROFDM0_TRSWISOLATION, BIT(31), 0); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 1); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(27), 0); rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(13)|BIT(12), 2); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 0); rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_parity_fail = %d, cnt_rate_illegal = %d, " "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", alm_cnt->cnt_parity_fail, alm_cnt->cnt_rate_illegal, alm_cnt->cnt_crc8_fail, alm_cnt->cnt_mcs_fail); RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", alm_cnt->cnt_ofdm_fail, alm_cnt->cnt_cck_fail, alm_cnt->cnt_all); } static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; u8 cur_cck_cca_thresh; if (dm_dig->cursta_cstate == DIG_STA_CONNECT) { dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); if (dm_dig->rssi_val_min > 25) { cur_cck_cca_thresh = 0xcd; } else if ((dm_dig->rssi_val_min <= 25) && (dm_dig->rssi_val_min > 10)) { cur_cck_cca_thresh = 0x83; } else { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) cur_cck_cca_thresh = 0x83; else cur_cck_cca_thresh = 0x40; } } else { if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) cur_cck_cca_thresh = 0x83; else cur_cck_cca_thresh = 0x40; } if (dm_dig->cur_cck_cca_thres != cur_cck_cca_thresh) rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh); dm_dig->cur_cck_cca_thres = cur_cck_cca_thresh; dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres; RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres); } static void rtl88e_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 dig_min, dig_maxofmin; bool bfirstconnect; u8 dm_dig_max, dm_dig_min; u8 current_igi = dm_dig->cur_igvalue; if (rtlpriv->dm.dm_initialgain_enable == false) return; if (dm_dig->dig_enable_flag == false) return; if (mac->act_scanning == true) return; if (mac->link_state >= MAC80211_LINKED) dm_dig->cursta_cstate = DIG_STA_CONNECT; else dm_dig->cursta_cstate = DIG_STA_DISCONNECT; if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) dm_dig->cursta_cstate = DIG_STA_DISCONNECT; dm_dig_max = DM_DIG_MAX; dm_dig_min = DM_DIG_MIN; dig_maxofmin = DM_DIG_MAX_AP; dig_min = dm_dig->dig_min_0; bfirstconnect = ((mac->link_state >= MAC80211_LINKED) ? true : false) && (dm_dig->media_connect_0 == false); dm_dig->rssi_val_min = rtl88e_dm_initial_gain_min_pwdb(hw); if (mac->link_state >= MAC80211_LINKED) { if ((dm_dig->rssi_val_min + 20) > dm_dig_max) dm_dig->rx_gain_max = dm_dig_max; else if ((dm_dig->rssi_val_min + 20) < dm_dig_min) dm_dig->rx_gain_max = dm_dig_min; else dm_dig->rx_gain_max = dm_dig->rssi_val_min + 20; if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { dig_min = dm_dig->antdiv_rssi_max; } else { if (dm_dig->rssi_val_min < dm_dig_min) dig_min = dm_dig_min; else if (dm_dig->rssi_val_min < dig_maxofmin) dig_min = dig_maxofmin; else dig_min = dm_dig->rssi_val_min; } } else { dm_dig->rx_gain_max = dm_dig_max; dig_min = dm_dig_min; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); } if (rtlpriv->falsealm_cnt.cnt_all > 10000) { dm_dig->large_fa_hit++; if (dm_dig->forbidden_igi < current_igi) { dm_dig->forbidden_igi = current_igi; dm_dig->large_fa_hit = 1; } if (dm_dig->large_fa_hit >= 3) { if ((dm_dig->forbidden_igi + 1) > dm_dig->rx_gain_max) dm_dig->rx_gain_min = dm_dig->rx_gain_max; else dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1; dm_dig->recover_cnt = 3600; } } else { if (dm_dig->recover_cnt != 0) { dm_dig->recover_cnt--; } else { if (dm_dig->large_fa_hit == 0) { if ((dm_dig->forbidden_igi - 1) < dig_min) { dm_dig->forbidden_igi = dig_min; dm_dig->rx_gain_min = dig_min; } else { dm_dig->forbidden_igi--; dm_dig->rx_gain_min = dm_dig->forbidden_igi + 1; } } else if (dm_dig->large_fa_hit == 3) { dm_dig->large_fa_hit = 0; } } } if (dm_dig->cursta_cstate == DIG_STA_CONNECT) { if (bfirstconnect) { current_igi = dm_dig->rssi_val_min; } else { if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) current_igi += 2; else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) current_igi++; else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) current_igi--; } } else { if (rtlpriv->falsealm_cnt.cnt_all > 10000) current_igi += 2; else if (rtlpriv->falsealm_cnt.cnt_all > 8000) current_igi++; else if (rtlpriv->falsealm_cnt.cnt_all < 500) current_igi--; } if (current_igi > DM_DIG_FA_UPPER) current_igi = DM_DIG_FA_UPPER; else if (current_igi < DM_DIG_FA_LOWER) current_igi = DM_DIG_FA_LOWER; if (rtlpriv->falsealm_cnt.cnt_all > 10000) current_igi = DM_DIG_FA_UPPER; dm_dig->cur_igvalue = current_igi; rtl88e_dm_write_dig(hw); dm_dig->media_connect_0 = ((mac->link_state >= MAC80211_LINKED) ? true : false); dm_dig->dig_min_0 = dig_min; rtl88e_dm_cck_packet_detection_thresh(hw); } static void rtl88e_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dynamic_txpower_enable = false; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undec_sm_pwdb; if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if ((mac->link_state < MAC80211_LINKED) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "Not connected\n"); rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; return; } if (mac->link_state >= MAC80211_LINKED) { if (mac->opmode == NL80211_IFTYPE_ADHOC) { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Client PWDB = 0x%lx\n", undec_sm_pwdb); } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "STA Default Port PWDB = 0x%lx\n", undec_sm_pwdb); } } else { undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "AP Ext Port PWDB = 0x%lx\n", undec_sm_pwdb); } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n"); } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_NORMAL\n"); } if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } void rtl88e_dm_write_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cur_igvalue = 0x%x, " "pre_igvalue = 0x%x, back_val = %d\n", dm_dig->cur_igvalue, dm_dig->pre_igvalue, dm_dig->back_val); if (dm_dig->cur_igvalue > 0x3f) dm_dig->cur_igvalue = 0x3f; if (dm_dig->pre_igvalue != dm_dig->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, dm_dig->cur_igvalue); dm_dig->pre_igvalue = dm_dig->cur_igvalue; } } static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_sta_info *drv_priv; static u64 last_txok; static u64 last_rx; long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; if (rtlhal->oem_id == RT_CID_819x_HP) { u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rx; last_txok = cur_txok_cnt; last_rx = cur_rxok_cnt; if (cur_rxok_cnt > (cur_txok_cnt * 6)) rtl_write_dword(rtlpriv, REG_ARFR0, 0x8f015); else rtl_write_dword(rtlpriv, REG_ARFR0, 0xff015); } /* AP & ADHOC & MESH */ spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { if (drv_priv->rssi_stat.undec_sm_pwdb < tmp_entry_min_pwdb) tmp_entry_min_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; if (drv_priv->rssi_stat.undec_sm_pwdb > tmp_entry_max_pwdb) tmp_entry_max_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); /* If associated entry is found */ if (tmp_entry_max_pwdb != 0) { rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb; RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMaxPWDB = 0x%lx(%ld)\n", tmp_entry_max_pwdb, tmp_entry_max_pwdb); } else { rtlpriv->dm.entry_max_undec_sm_pwdb = 0; } /* If associated entry is found */ if (tmp_entry_min_pwdb != 0xff) { rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb; RTPRINT(rtlpriv, FDM, DM_PWDB, "EntryMinPWDB = 0x%lx(%ld)\n", tmp_entry_min_pwdb, tmp_entry_min_pwdb); } else { rtlpriv->dm.entry_min_undec_sm_pwdb = 0; } /* Indicate Rx signal strength to FW. */ if (!rtlpriv->dm.useramask) rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); } void rtl88e_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; } static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); static u64 last_txok_cnt; static u64 last_rxok_cnt; static u32 last_bt_edca_ul; static u32 last_bt_edca_dl; u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; u32 edca_be_ul = 0x5ea42b; u32 edca_be_dl = 0x5ea42b; bool change_edca = false; if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) || (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) { rtlpriv->dm.current_turbo_edca = false; last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul; last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl; } if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul; change_edca = true; } if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) { edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl; change_edca = true; } if (mac->link_state != MAC80211_LINKED) { rtlpriv->dm.current_turbo_edca = false; return; } if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) { if (!(edca_be_ul & 0xffff0000)) edca_be_ul |= 0x005e0000; if (!(edca_be_dl & 0xffff0000)) edca_be_dl |= 0x005e0000; } if ((change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; if (cur_rxok_cnt > 4 * cur_txok_cnt) { if (!rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_dl); rtlpriv->dm.is_cur_rdlstate = true; } } else { if (rtlpriv->dm.is_cur_rdlstate || !rtlpriv->dm.current_turbo_edca) { rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be_ul); rtlpriv->dm.is_cur_rdlstate = false; } } rtlpriv->dm.current_turbo_edca = true; } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)(&tmp)); rtlpriv->dm.current_turbo_edca = false; } } rtlpriv->dm.is_any_nonbepkts = false; last_txok_cnt = rtlpriv->stats.txbytesunicast; last_rxok_cnt = rtlpriv->stats.rxbytesunicast; } static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 thermalvalue = 0, delta, delta_lck, delta_iqk, off; u8 th_avg_cnt = 0; u32 thermalvalue_avg = 0; long ele_d, temp_cck; char ofdm_index[2], cck_index = 0, ofdm_old[2] = {0, 0}, cck_old = 0; int i = 0; bool is2t = false; u8 ofdm_min_index = 6, rf = (is2t) ? 2 : 1; u8 index_for_channel; enum _dec_inc {dec, power_inc}; /* 0.1 the following TWO tables decide the final index of * OFDM/CCK swing table */ char del_tbl_idx[2][15] = { {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10} }; u8 thermal_threshold[2][15] = { {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 27}, {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 25, 25, 25} }; /*Initilization (7 steps in total) */ rtlpriv->dm.txpower_trackinginit = true; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "rtl88e_dm_txpower_tracking_callback_thermalmeter\n"); thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00); if (!thermalvalue) return; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter); /*1. Query OFDM Default Setting: Path A*/ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBAL, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_old[0] = (u8) i; rtldm->swing_idx_ofdm_base = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", ROFDM0_XATXIQIMBAL, ele_d, ofdm_old[0]); break; } } if (is2t) { ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBAL, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_old[1] = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", ROFDM0_XBTXIQIMBAL, ele_d, ofdm_old[1]); break; } } } /*2.Query CCK default setting From 0xa24*/ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; for (i = 0; i < CCK_TABLE_LENGTH; i++) { if (rtlpriv->dm.cck_inch14) { if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) { cck_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_old, rtlpriv->dm.cck_inch14); break; } } else { if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) { cck_old = (u8)i; rtldm->swing_idx_cck_base = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n", RCCK0_TXFILTER2, temp_cck, cck_old, rtlpriv->dm.cck_inch14); break; } } } /*3 Initialize ThermalValues of RFCalibrateInfo*/ if (!rtldm->thermalvalue) { rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter; rtlpriv->dm.thermalvalue_lck = thermalvalue; rtlpriv->dm.thermalvalue_iqk = thermalvalue; for (i = 0; i < rf; i++) rtlpriv->dm.ofdm_index[i] = ofdm_old[i]; rtlpriv->dm.cck_index = cck_old; } /*4 Calculate average thermal meter*/ rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue; rtldm->thermalvalue_avg_index++; if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_88E) rtldm->thermalvalue_avg_index = 0; for (i = 0; i < AVG_THERMAL_NUM_88E; i++) { if (rtldm->thermalvalue_avg[i]) { thermalvalue_avg += rtldm->thermalvalue_avg[i]; th_avg_cnt++; } } if (th_avg_cnt) thermalvalue = (u8)(thermalvalue_avg / th_avg_cnt); /* 5 Calculate delta, delta_LCK, delta_IQK.*/ if (rtlhal->reloadtxpowerindex) { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); rtlhal->reloadtxpowerindex = false; rtlpriv->dm.done_txpower = false; } else if (rtlpriv->dm.done_txpower) { delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? (thermalvalue - rtlpriv->dm.thermalvalue) : (rtlpriv->dm.thermalvalue - thermalvalue); } else { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); } delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? (thermalvalue - rtlpriv->dm.thermalvalue_lck) : (rtlpriv->dm.thermalvalue_lck - thermalvalue); delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : (rtlpriv->dm.thermalvalue_iqk - thermalvalue); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " "eeprom_thermalmeter 0x%x delta 0x%x " "delta_lck 0x%x delta_iqk 0x%x\n", thermalvalue, rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); /* 6 If necessary, do LCK.*/ if (delta_lck >= 8) { rtlpriv->dm.thermalvalue_lck = thermalvalue; rtl88e_phy_lc_calibrate(hw); } /* 7 If necessary, move the index of swing table to adjust Tx power. */ if (delta > 0 && rtlpriv->dm.txpower_track_control) { delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? (thermalvalue - rtlefuse->eeprom_thermalmeter) : (rtlefuse->eeprom_thermalmeter - thermalvalue); /* 7.1 Get the final CCK_index and OFDM_index for each * swing table. */ if (thermalvalue > rtlefuse->eeprom_thermalmeter) { CAL_SWING_OFF(off, power_inc, IDX_MAP, delta); for (i = 0; i < rf; i++) ofdm_index[i] = rtldm->ofdm_index[i] + del_tbl_idx[power_inc][off]; cck_index = rtldm->cck_index + del_tbl_idx[power_inc][off]; } else { CAL_SWING_OFF(off, dec, IDX_MAP, delta); for (i = 0; i < rf; i++) ofdm_index[i] = rtldm->ofdm_index[i] + del_tbl_idx[dec][off]; cck_index = rtldm->cck_index + del_tbl_idx[dec][off]; } /* 7.2 Handle boundary conditions of index.*/ for (i = 0; i < rf; i++) { if (ofdm_index[i] > OFDM_TABLE_SIZE-1) ofdm_index[i] = OFDM_TABLE_SIZE-1; else if (rtldm->ofdm_index[i] < ofdm_min_index) ofdm_index[i] = ofdm_min_index; } if (cck_index > CCK_TABLE_SIZE - 1) cck_index = CCK_TABLE_SIZE - 1; else if (cck_index < 0) cck_index = 0; /*7.3Configure the Swing Table to adjust Tx Power.*/ if (rtlpriv->dm.txpower_track_control) { rtldm->done_txpower = true; rtldm->swing_idx_ofdm[RF90_PATH_A] = (u8)ofdm_index[RF90_PATH_A]; if (is2t) rtldm->swing_idx_ofdm[RF90_PATH_B] = (u8)ofdm_index[RF90_PATH_B]; rtldm->swing_idx_cck = cck_index; if (rtldm->swing_idx_ofdm_cur != rtldm->swing_idx_ofdm[0]) { rtldm->swing_idx_ofdm_cur = rtldm->swing_idx_ofdm[0]; rtldm->swing_flag_ofdm = true; } if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) { rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck; rtldm->swing_flag_cck = true; } rtl88e_chk_tx_track(hw, TXAGC, 0, 0); if (is2t) rtl88e_chk_tx_track(hw, BBSWING, RF90_PATH_B, index_for_channel); } } if (delta_iqk >= 8) { rtlpriv->dm.thermalvalue_iqk = thermalvalue; rtl88e_phy_iq_calibrate(hw, false); } if (rtldm->txpower_track_control) rtldm->thermalvalue = thermalvalue; rtldm->txpowercount = 0; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n"); } static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.txpower_tracking = true; rtlpriv->dm.txpower_trackinginit = false; rtlpriv->dm.txpowercount = 0; rtlpriv->dm.txpower_track_control = true; rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] = 12; rtlpriv->dm.swing_idx_ofdm_cur = 12; rtlpriv->dm.swing_flag_ofdm = false; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, " rtlpriv->dm.txpower_tracking = %d\n", rtlpriv->dm.txpower_tracking); } void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); static u8 tm_trigger; if (!rtlpriv->dm.txpower_tracking) return; if (!tm_trigger) { rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16), 0x03); RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Trigger 88E Thermal Meter!!\n"); tm_trigger = 1; return; } else { RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Schedule TxPowerTracking !!\n"); rtl88e_dm_txpower_tracking_callback_thermalmeter(hw); tm_trigger = 0; } } void rtl88e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rate_adaptive *p_ra = &(rtlpriv->ra); p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) rtlpriv->dm.useramask = true; else rtlpriv->dm.useramask = false; } static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rate_adaptive *p_ra = &(rtlpriv->ra); struct ieee80211_sta *sta = NULL; u32 low_rssi, hi_rssi; if (is_hal_stop(rtlhal)) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "driver is going to unload\n"); return; } if (!rtlpriv->dm.useramask) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "driver does not control rate adaptive mask\n"); return; } if (mac->link_state == MAC80211_LINKED && mac->opmode == NL80211_IFTYPE_STATION) { switch (p_ra->pre_ratr_state) { case DM_RATR_STA_HIGH: hi_rssi = 50; low_rssi = 20; break; case DM_RATR_STA_MIDDLE: hi_rssi = 55; low_rssi = 20; break; case DM_RATR_STA_LOW: hi_rssi = 50; low_rssi = 25; break; default: hi_rssi = 50; low_rssi = 20; break; } if (rtlpriv->dm.undec_sm_pwdb > (long)hi_rssi) p_ra->ratr_state = DM_RATR_STA_HIGH; else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi) p_ra->ratr_state = DM_RATR_STA_MIDDLE; else p_ra->ratr_state = DM_RATR_STA_LOW; if (p_ra->pre_ratr_state != p_ra->ratr_state) { RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", rtlpriv->dm.undec_sm_pwdb); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI_LEVEL = %d\n", p_ra->ratr_state); RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "PreState = %d, CurState = %d\n", p_ra->pre_ratr_state, p_ra->ratr_state); rcu_read_lock(); sta = rtl_find_sta(hw, mac->bssid); if (sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); rcu_read_unlock(); p_ra->pre_ratr_state = p_ra->ratr_state; } } } static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; dm_pstable->pre_ccastate = CCA_MAX; dm_pstable->cur_ccasate = CCA_MAX; dm_pstable->pre_rfstate = RF_MAX; dm_pstable->cur_rfstate = RF_MAX; dm_pstable->rssi_val_min = 0; } static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw, u8 ant) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u32 def_ant, opt_ant; if (fat_tbl->rx_idle_ant != ant) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "need to update rx idle ant\n"); if (ant == MAIN_ANT) { def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; } else { def_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX; opt_ant = (fat_tbl->rx_idle_ant == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), def_ant); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), opt_ant); rtl_set_bbreg(hw, DM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), def_ant); rtl_set_bbreg(hw, DM_REG_RESP_TX_11N, BIT(6) | BIT(7), def_ant); } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), def_ant); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), opt_ant); } } fat_tbl->rx_idle_ant = ant; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n", ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"))); } static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw, u8 ant, u32 mac_id) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u8 target_ant; if (ant == MAIN_ANT) target_ant = MAIN_ANT_CG_TRX; else target_ant = AUX_ANT_CG_TRX; fat_tbl->antsel_a[mac_id] = target_ant & BIT(0); fat_tbl->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1; fat_tbl->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n", ((ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"))); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n", fat_tbl->antsel_c[mac_id], fat_tbl->antsel_b[mac_id], fat_tbl->antsel_a[mac_id]); } static void rtl88e_dm_rx_hw_antena_div_init(struct ieee80211_hw *hw) { u32 value32; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 1); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*CCK Setting*/ rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201); } static void rtl88e_dm_trx_hw_antenna_div_init(struct ieee80211_hw *hw) { u32 value32; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*CCK Setting*/ rtl_set_bbreg(hw, DM_REG_BB_PWR_SAV4_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /*TX Setting*/ rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0); rtl88e_dm_update_rx_idle_ant(hw, MAIN_ANT); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKLWORD, 0x0201); } static void rtl88e_dm_fast_training_init(struct ieee80211_hw *hw) { struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u32 ant_combo = 2; u32 value32, i; for (i = 0; i < 6; i++) { fat_tbl->bssid[i] = 0; fat_tbl->ant_sum[i] = 0; fat_tbl->ant_cnt[i] = 0; fat_tbl->ant_ave[i] = 0; } fat_tbl->train_idx = 0; fat_tbl->fat_state = FAT_NORMAL_STATE; /*MAC Setting*/ value32 = rtl_get_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANTSEL_PIN_11N, MASKDWORD, value32 | (BIT(23) | BIT(25))); value32 = rtl_get_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKDWORD, value32 | (BIT(16) | BIT(17))); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKLWORD, 0); rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, MASKDWORD, 0); /*Pin Setting*/ rtl_set_bbreg(hw, DM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(10), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(22), 0); rtl_set_bbreg(hw, DM_REG_LNA_SWITCH_11N, BIT(31), 1); /*OFDM Setting*/ rtl_set_bbreg(hw, DM_REG_ANTDIV_PARA1_11N, MASKDWORD, 0x000000a0); /*antenna mapping table*/ if (ant_combo == 2) { rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); } else if (ant_combo == 7) { rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE0, 1); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE1, 2); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE2, 2); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING1_11N, MASKBYTE3, 3); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE0, 4); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE1, 5); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE2, 6); rtl_set_bbreg(hw, DM_REG_ANT_MAPPING2_11N, MASKBYTE3, 7); } /*TX Setting*/ rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), 1); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(2) | BIT(1) | BIT(0), (ant_combo - 1)); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); } static void rtl88e_dm_antenna_div_init(struct ieee80211_hw *hw) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) rtl88e_dm_rx_hw_antena_div_init(hw); else if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_trx_hw_antenna_div_init(hw); else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) rtl88e_dm_fast_training_init(hw); } void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc, u32 mac_id) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)) { SET_TX_DESC_ANTSEL_A(pdesc, fat_tbl->antsel_a[mac_id]); SET_TX_DESC_ANTSEL_B(pdesc, fat_tbl->antsel_b[mac_id]); SET_TX_DESC_ANTSEL_C(pdesc, fat_tbl->antsel_c[mac_id]); } } void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux, u32 mac_id, u32 rx_pwdb_all) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CG_TRX) { fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all; fat_tbl->main_ant_cnt[mac_id]++; } else { fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all; fat_tbl->aux_ant_cnt[mac_id]++; } } else if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) { if (antsel_tr_mux == MAIN_ANT_CGCS_RX) { fat_tbl->main_ant_sum[mac_id] += rx_pwdb_all; fat_tbl->main_ant_cnt[mac_id]++; } else { fat_tbl->aux_ant_sum[mac_id] += rx_pwdb_all; fat_tbl->aux_ant_cnt[mac_id]++; } } } static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_dig = &rtlpriv->dm_digtable; struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u32 i, min_rssi = 0xff, ant_div_max_rssi = 0, max_rssi = 0; u32 local_min_rssi, local_max_rssi; u32 main_rssi, aux_rssi; u8 rx_idle_ant = 0, target_ant = 7; i = 0; main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ? (fat_tbl->main_ant_sum[i] / fat_tbl->main_ant_cnt[i]) : 0; aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ? (fat_tbl->aux_ant_sum[i] / fat_tbl->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? fat_tbl->rx_idle_ant : ((main_rssi >= aux_rssi) ? MAIN_ANT : AUX_ANT); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_ant_sum %d main_ant_cnt %d\n", fat_tbl->main_ant_sum[i], fat_tbl->main_ant_cnt[i]); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "aux_ant_sum %d aux_ant_cnt %d\n", fat_tbl->aux_ant_sum[i], fat_tbl->aux_ant_cnt[i]); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n", main_rssi, aux_rssi); local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi; if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; if ((fat_tbl->rx_idle_ant == MAIN_ANT) && (main_rssi == 0)) main_rssi = aux_rssi; else if ((fat_tbl->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi; if (local_min_rssi < min_rssi) { min_rssi = local_min_rssi; rx_idle_ant = target_ant; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_update_tx_ant(hw, target_ant, i); if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { i++; main_rssi = (fat_tbl->main_ant_cnt[i] != 0) ? (fat_tbl->main_ant_sum[i] / fat_tbl->main_ant_cnt[i]) : 0; aux_rssi = (fat_tbl->aux_ant_cnt[i] != 0) ? (fat_tbl->aux_ant_sum[i] / fat_tbl->aux_ant_cnt[i]) : 0; target_ant = (main_rssi == aux_rssi) ? fat_tbl->rx_idle_ant : ((main_rssi >= aux_rssi) ? MAIN_ANT : AUX_ANT); local_max_rssi = max_t(u32, main_rssi, aux_rssi); if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40)) ant_div_max_rssi = local_max_rssi; if (local_max_rssi > max_rssi) max_rssi = local_max_rssi; if ((fat_tbl->rx_idle_ant == MAIN_ANT) && !main_rssi) main_rssi = aux_rssi; else if ((fat_tbl->rx_idle_ant == AUX_ANT) && (aux_rssi == 0)) aux_rssi = main_rssi; local_min_rssi = (main_rssi > aux_rssi) ? aux_rssi : main_rssi; if (local_min_rssi < min_rssi) { min_rssi = local_min_rssi; rx_idle_ant = target_ant; } if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl88e_dm_update_tx_ant(hw, target_ant, i); } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); } for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { fat_tbl->main_ant_sum[i] = 0; fat_tbl->aux_ant_sum[i] = 0; fat_tbl->main_ant_cnt[i] = 0; fat_tbl->aux_ant_cnt[i] = 0; } rtl88e_dm_update_rx_idle_ant(hw, rx_idle_ant); dm_dig->antdiv_rssi_max = ant_div_max_rssi; dm_dig->rssi_max = max_rssi; } static void rtl88e_set_next_mac_address_target(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_sta_info *drv_priv; struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u32 value32, i, j = 0; if (mac->link_state >= MAC80211_LINKED) { for (i = 0; i < ASSOCIATE_ENTRY_NUM; i++) { if ((fat_tbl->train_idx + 1) == ASSOCIATE_ENTRY_NUM) fat_tbl->train_idx = 0; else fat_tbl->train_idx++; if (fat_tbl->train_idx == 0) { value32 = (mac->mac_addr[5] << 8) | mac->mac_addr[4]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKLWORD, value32); value32 = (mac->mac_addr[3] << 24) | (mac->mac_addr[2] << 16) | (mac->mac_addr[1] << 8) | mac->mac_addr[0]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, MASKDWORD, value32); break; } if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) { spin_lock_bh(&rtlpriv->locks.entry_list_lock); list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { j++; if (j != fat_tbl->train_idx) continue; value32 = (drv_priv->mac_addr[5] << 8) | drv_priv->mac_addr[4]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_2, MASKLWORD, value32); value32 = (drv_priv->mac_addr[3]<<24) | (drv_priv->mac_addr[2]<<16) | (drv_priv->mac_addr[1]<<8) | drv_priv->mac_addr[0]; rtl_set_bbreg(hw, DM_REG_ANT_TRAIN_1, MASKDWORD, value32); break; } spin_unlock_bh(&rtlpriv->locks.entry_list_lock); /*find entry, break*/ if (j == fat_tbl->train_idx) break; } } } } static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); u32 i, max_rssi = 0; u8 target_ant = 2; bool bpkt_filter_match = false; if (fat_tbl->fat_state == FAT_TRAINING_STATE) { for (i = 0; i < 7; i++) { if (fat_tbl->ant_cnt[i] == 0) { fat_tbl->ant_ave[i] = 0; } else { fat_tbl->ant_ave[i] = fat_tbl->ant_sum[i] / fat_tbl->ant_cnt[i]; bpkt_filter_match = true; } if (fat_tbl->ant_ave[i] > max_rssi) { max_rssi = fat_tbl->ant_ave[i]; target_ant = (u8) i; } } if (bpkt_filter_match == false) { rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 0); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); } else { rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 0); rtl_set_bbreg(hw, DM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), target_ant); rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); fat_tbl->antsel_a[fat_tbl->train_idx] = target_ant & BIT(0); fat_tbl->antsel_b[fat_tbl->train_idx] = (target_ant & BIT(1)) >> 1; fat_tbl->antsel_c[fat_tbl->train_idx] = (target_ant & BIT(2)) >> 2; if (target_ant == 0) rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); } for (i = 0; i < 7; i++) { fat_tbl->ant_sum[i] = 0; fat_tbl->ant_cnt[i] = 0; } fat_tbl->fat_state = FAT_NORMAL_STATE; return; } if (fat_tbl->fat_state == FAT_NORMAL_STATE) { rtl88e_set_next_mac_address_target(hw); fat_tbl->fat_state = FAT_TRAINING_STATE; rtl_set_bbreg(hw, DM_REG_TXAGC_A_1_MCS32_11N, BIT(16), 1); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); mod_timer(&rtlpriv->works.fast_antenna_training_timer, jiffies + MSECS(RTL_WATCH_DOG_TIME)); } } void rtl88e_dm_fast_antenna_training_callback(unsigned long data) { struct ieee80211_hw *hw = (struct ieee80211_hw *)data; rtl88e_dm_fast_ant_training(hw); } static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct fast_ant_training *fat_tbl = &(rtldm->fat_table); if (mac->link_state < MAC80211_LINKED) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n"); if (fat_tbl->becomelinked == true) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "need to turn off HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 0); fat_tbl->becomelinked = (mac->link_state == MAC80211_LINKED) ? true : false; } return; } else { if (fat_tbl->becomelinked == false) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Need to turn on HW AntDiv\n"); rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1); rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); if (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) rtl_set_bbreg(hw, DM_REG_TX_ANT_CTRL_11N, BIT(21), 1); fat_tbl->becomelinked = (mac->link_state >= MAC80211_LINKED) ? true : false; } } if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) rtl88e_dm_hw_ant_div(hw); else if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) rtl88e_dm_fast_ant_training(hw); } void rtl88e_dm_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl88e_dm_diginit(hw); rtl88e_dm_init_dynamic_txpower(hw); rtl88e_dm_init_edca_turbo(hw); rtl88e_dm_init_rate_adaptive_mask(hw); rtl88e_dm_init_txpower_tracking(hw); rtl92c_dm_init_dynamic_bb_powersaving(hw); rtl88e_dm_antenna_div_init(hw); } void rtl88e_dm_watchdog(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); bool fw_current_inpsmode = false; bool fw_ps_awake = true; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, (u8 *)(&fw_current_inpsmode)); rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake)); if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { rtl88e_dm_pwdb_monitor(hw); rtl88e_dm_dig(hw); rtl88e_dm_false_alarm_counter_statistics(hw); rtl92c_dm_dynamic_txpower(hw); rtl88e_dm_check_txpower_tracking(hw); rtl88e_dm_refresh_rate_adaptive_mask(hw); rtl88e_dm_check_edca_turbo(hw); rtl88e_dm_antenna_diversity(hw); } }
gpl-2.0
munjeni/kernel_htc_golfu
kernel/params.c
2218
21787
/* Helpers for initial module or kernel cmdline parsing Copyright (C) 2001 Rusty Russell. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/ctype.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt, a...) #endif /* Protects all parameters, and incidentally kmalloced_param list. */ static DEFINE_MUTEX(param_lock); /* This just allows us to keep track of which parameters are kmalloced. */ struct kmalloced_param { struct list_head list; char val[]; }; static LIST_HEAD(kmalloced_params); static void *kmalloc_parameter(unsigned int size) { struct kmalloced_param *p; p = kmalloc(sizeof(*p) + size, GFP_KERNEL); if (!p) return NULL; list_add(&p->list, &kmalloced_params); return p->val; } /* Does nothing if parameter wasn't kmalloced above. */ static void maybe_kfree_parameter(void *param) { struct kmalloced_param *p; list_for_each_entry(p, &kmalloced_params, list) { if (p->val == param) { list_del(&p->list); kfree(p); break; } } } static inline char dash2underscore(char c) { if (c == '-') return '_'; return c; } static inline int parameq(const char *input, const char *paramname) { unsigned int i; for (i = 0; dash2underscore(input[i]) == paramname[i]; i++) if (input[i] == '\0') return 1; return 0; } static int parse_one(char *param, char *val, const struct kernel_param *params, unsigned num_params, int (*handle_unknown)(char *param, char *val)) { unsigned int i; int err; /* Find parameter */ for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { /* No one handled NULL, so do it here. */ if (!val && params[i].ops->set != param_set_bool) return -EINVAL; DEBUGP("They are equal! Calling %p\n", params[i].ops->set); mutex_lock(&param_lock); err = params[i].ops->set(val, &params[i]); mutex_unlock(&param_lock); return err; } } if (handle_unknown) { DEBUGP("Unknown argument: calling %p\n", handle_unknown); return handle_unknown(param, val); } DEBUGP("Unknown argument `%s'\n", param); return -ENOENT; } /* You can use " around spaces, but can't escape ". */ /* Hyphens and underscores equivalent in parameter names. */ static char *next_arg(char *args, char **param, char **val) { unsigned int i, equals = 0; int in_quote = 0, quoted = 0; char *next; if (*args == '"') { args++; in_quote = 1; quoted = 1; } for (i = 0; args[i]; i++) { if (isspace(args[i]) && !in_quote) break; if (equals == 0) { if (args[i] == '=') equals = i; } if (args[i] == '"') in_quote = !in_quote; } *param = args; if (!equals) *val = NULL; else { args[equals] = '\0'; *val = args + equals + 1; /* Don't include quotes in value. */ if (**val == '"') { (*val)++; if (args[i-1] == '"') args[i-1] = '\0'; } if (quoted && args[i-1] == '"') args[i-1] = '\0'; } if (args[i]) { args[i] = '\0'; next = args + i + 1; } else next = args + i; /* Chew up trailing spaces. */ return skip_spaces(next); } /* Args looks like "foo=bar,bar2 baz=fuz wiz". */ int parse_args(const char *name, char *args, const struct kernel_param *params, unsigned num, int (*unknown)(char *param, char *val)) { char *param, *val; DEBUGP("Parsing ARGS: %s\n", args); /* Chew leading spaces */ args = skip_spaces(args); while (*args) { int ret; int irq_was_disabled; args = next_arg(args, &param, &val); irq_was_disabled = irqs_disabled(); ret = parse_one(param, val, params, num, unknown); if (irq_was_disabled && !irqs_disabled()) { printk(KERN_WARNING "parse_args(): option '%s' enabled " "irq's!\n", param); } switch (ret) { case -ENOENT: printk(KERN_ERR "%s: Unknown parameter `%s'\n", name, param); return ret; case -ENOSPC: printk(KERN_ERR "%s: `%s' too large for parameter `%s'\n", name, val ?: "", param); return ret; case 0: break; default: printk(KERN_ERR "%s: `%s' invalid for parameter `%s'\n", name, val ?: "", param); return ret; } } /* All parsed OK. */ return 0; } /* Lazy bastard, eh? */ #define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \ int param_set_##name(const char *val, const struct kernel_param *kp) \ { \ tmptype l; \ int ret; \ \ ret = strtolfn(val, 0, &l); \ if (ret == -EINVAL || ((type)l != l)) \ return -EINVAL; \ *((type *)kp->arg) = l; \ return 0; \ } \ int param_get_##name(char *buffer, const struct kernel_param *kp) \ { \ return sprintf(buffer, format, *((type *)kp->arg)); \ } \ struct kernel_param_ops param_ops_##name = { \ .set = param_set_##name, \ .get = param_get_##name, \ }; \ EXPORT_SYMBOL(param_set_##name); \ EXPORT_SYMBOL(param_get_##name); \ EXPORT_SYMBOL(param_ops_##name) STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol); STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol); STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul); STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol); STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul); int param_set_charp(const char *val, const struct kernel_param *kp) { if (strlen(val) > 1024) { printk(KERN_ERR "%s: string parameter too long\n", kp->name); return -ENOSPC; } maybe_kfree_parameter(*(char **)kp->arg); /* This is a hack. We can't kmalloc in early boot, and we * don't need to; this mangled commandline is preserved. */ if (slab_is_available()) { *(char **)kp->arg = kmalloc_parameter(strlen(val)+1); if (!*(char **)kp->arg) return -ENOMEM; strcpy(*(char **)kp->arg, val); } else *(const char **)kp->arg = val; return 0; } EXPORT_SYMBOL(param_set_charp); int param_get_charp(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%s", *((char **)kp->arg)); } EXPORT_SYMBOL(param_get_charp); static void param_free_charp(void *arg) { maybe_kfree_parameter(*((char **)arg)); } struct kernel_param_ops param_ops_charp = { .set = param_set_charp, .get = param_get_charp, .free = param_free_charp, }; EXPORT_SYMBOL(param_ops_charp); /* Actually could be a bool or an int, for historical reasons. */ int param_set_bool(const char *val, const struct kernel_param *kp) { bool v; int ret; /* No equals means "set"... */ if (!val) val = "1"; /* One of =[yYnN01] */ ret = strtobool(val, &v); if (ret) return ret; if (kp->flags & KPARAM_ISBOOL) *(bool *)kp->arg = v; else *(int *)kp->arg = v; return 0; } EXPORT_SYMBOL(param_set_bool); int param_get_bool(char *buffer, const struct kernel_param *kp) { bool val; if (kp->flags & KPARAM_ISBOOL) val = *(bool *)kp->arg; else val = *(int *)kp->arg; /* Y and N chosen as being relatively non-coder friendly */ return sprintf(buffer, "%c", val ? 'Y' : 'N'); } EXPORT_SYMBOL(param_get_bool); struct kernel_param_ops param_ops_bool = { .set = param_set_bool, .get = param_get_bool, }; EXPORT_SYMBOL(param_ops_bool); /* This one must be bool. */ int param_set_invbool(const char *val, const struct kernel_param *kp) { int ret; bool boolval; struct kernel_param dummy; dummy.arg = &boolval; dummy.flags = KPARAM_ISBOOL; ret = param_set_bool(val, &dummy); if (ret == 0) *(bool *)kp->arg = !boolval; return ret; } EXPORT_SYMBOL(param_set_invbool); int param_get_invbool(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); } EXPORT_SYMBOL(param_get_invbool); struct kernel_param_ops param_ops_invbool = { .set = param_set_invbool, .get = param_get_invbool, }; EXPORT_SYMBOL(param_ops_invbool); /* We break the rule and mangle the string. */ static int param_array(const char *name, const char *val, unsigned int min, unsigned int max, void *elem, int elemsize, int (*set)(const char *, const struct kernel_param *kp), u16 flags, unsigned int *num) { int ret; struct kernel_param kp; char save; /* Get the name right for errors. */ kp.name = name; kp.arg = elem; kp.flags = flags; *num = 0; /* We expect a comma-separated list of values. */ do { int len; if (*num == max) { printk(KERN_ERR "%s: can only take %i arguments\n", name, max); return -EINVAL; } len = strcspn(val, ","); /* nul-terminate and parse */ save = val[len]; ((char *)val)[len] = '\0'; BUG_ON(!mutex_is_locked(&param_lock)); ret = set(val, &kp); if (ret != 0) return ret; kp.arg += elemsize; val += len+1; (*num)++; } while (save == ','); if (*num < min) { printk(KERN_ERR "%s: needs at least %i arguments\n", name, min); return -EINVAL; } return 0; } static int param_array_set(const char *val, const struct kernel_param *kp) { const struct kparam_array *arr = kp->arr; unsigned int temp_num; return param_array(kp->name, val, 1, arr->max, arr->elem, arr->elemsize, arr->ops->set, kp->flags, arr->num ?: &temp_num); } static int param_array_get(char *buffer, const struct kernel_param *kp) { int i, off, ret; const struct kparam_array *arr = kp->arr; struct kernel_param p; p = *kp; for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { if (i) buffer[off++] = ','; p.arg = arr->elem + arr->elemsize * i; BUG_ON(!mutex_is_locked(&param_lock)); ret = arr->ops->get(buffer + off, &p); if (ret < 0) return ret; off += ret; } buffer[off] = '\0'; return off; } static void param_array_free(void *arg) { unsigned int i; const struct kparam_array *arr = arg; if (arr->ops->free) for (i = 0; i < (arr->num ? *arr->num : arr->max); i++) arr->ops->free(arr->elem + arr->elemsize * i); } struct kernel_param_ops param_array_ops = { .set = param_array_set, .get = param_array_get, .free = param_array_free, }; EXPORT_SYMBOL(param_array_ops); int param_set_copystring(const char *val, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; if (strlen(val)+1 > kps->maxlen) { printk(KERN_ERR "%s: string doesn't fit in %u chars.\n", kp->name, kps->maxlen-1); return -ENOSPC; } strcpy(kps->string, val); return 0; } EXPORT_SYMBOL(param_set_copystring); int param_get_string(char *buffer, const struct kernel_param *kp) { const struct kparam_string *kps = kp->str; return strlcpy(buffer, kps->string, kps->maxlen); } EXPORT_SYMBOL(param_get_string); struct kernel_param_ops param_ops_string = { .set = param_set_copystring, .get = param_get_string, }; EXPORT_SYMBOL(param_ops_string); /* sysfs output in /sys/modules/XYZ/parameters/ */ #define to_module_attr(n) container_of(n, struct module_attribute, attr) #define to_module_kobject(n) container_of(n, struct module_kobject, kobj) extern struct kernel_param __start___param[], __stop___param[]; struct param_attribute { struct module_attribute mattr; const struct kernel_param *param; }; struct module_param_attrs { unsigned int num; struct attribute_group grp; struct param_attribute attrs[0]; }; #ifdef CONFIG_SYSFS #define to_param_attr(n) container_of(n, struct param_attribute, mattr) static ssize_t param_attr_show(struct module_attribute *mattr, struct module *mod, char *buf) { int count; struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->ops->get) return -EPERM; mutex_lock(&param_lock); count = attribute->param->ops->get(buf, attribute->param); mutex_unlock(&param_lock); if (count > 0) { strcat(buf, "\n"); ++count; } return count; } /* sysfs always hands a nul-terminated string in buf. We rely on that. */ static ssize_t param_attr_store(struct module_attribute *mattr, struct module *owner, const char *buf, size_t len) { int err; struct param_attribute *attribute = to_param_attr(mattr); if (!attribute->param->ops->set) return -EPERM; mutex_lock(&param_lock); err = attribute->param->ops->set(buf, attribute->param); mutex_unlock(&param_lock); if (!err) return len; return err; } #endif #ifdef CONFIG_MODULES #define __modinit #else #define __modinit __init #endif #ifdef CONFIG_SYSFS void __kernel_param_lock(void) { mutex_lock(&param_lock); } EXPORT_SYMBOL(__kernel_param_lock); void __kernel_param_unlock(void) { mutex_unlock(&param_lock); } EXPORT_SYMBOL(__kernel_param_unlock); /* * add_sysfs_param - add a parameter to sysfs * @mk: struct module_kobject * @kparam: the actual parameter definition to add to sysfs * @name: name of parameter * * Create a kobject if for a (per-module) parameter if mp NULL, and * create file in sysfs. Returns an error on out of memory. Always cleans up * if there's an error. */ static __modinit int add_sysfs_param(struct module_kobject *mk, const struct kernel_param *kp, const char *name) { struct module_param_attrs *new; struct attribute **attrs; int err, num; /* We don't bother calling this with invisible parameters. */ BUG_ON(!kp->perm); if (!mk->mp) { num = 0; attrs = NULL; } else { num = mk->mp->num; attrs = mk->mp->grp.attrs; } /* Enlarge. */ new = krealloc(mk->mp, sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1), GFP_KERNEL); if (!new) { kfree(mk->mp); err = -ENOMEM; goto fail; } attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL); if (!attrs) { err = -ENOMEM; goto fail_free_new; } /* Sysfs wants everything zeroed. */ memset(new, 0, sizeof(*new)); memset(&new->attrs[num], 0, sizeof(new->attrs[num])); memset(&attrs[num], 0, sizeof(attrs[num])); new->grp.name = "parameters"; new->grp.attrs = attrs; /* Tack new one on the end. */ sysfs_attr_init(&new->attrs[num].mattr.attr); new->attrs[num].param = kp; new->attrs[num].mattr.show = param_attr_show; new->attrs[num].mattr.store = param_attr_store; new->attrs[num].mattr.attr.name = (char *)name; new->attrs[num].mattr.attr.mode = kp->perm; new->num = num+1; /* Fix up all the pointers, since krealloc can move us */ for (num = 0; num < new->num; num++) new->grp.attrs[num] = &new->attrs[num].mattr.attr; new->grp.attrs[num] = NULL; mk->mp = new; return 0; fail_free_new: kfree(new); fail: mk->mp = NULL; return err; } #ifdef CONFIG_MODULES static void free_module_param_attrs(struct module_kobject *mk) { kfree(mk->mp->grp.attrs); kfree(mk->mp); mk->mp = NULL; } /* * module_param_sysfs_setup - setup sysfs support for one module * @mod: module * @kparam: module parameters (array) * @num_params: number of module parameters * * Adds sysfs entries for module parameters under * /sys/module/[mod->name]/parameters/ */ int module_param_sysfs_setup(struct module *mod, const struct kernel_param *kparam, unsigned int num_params) { int i, err; bool params = false; for (i = 0; i < num_params; i++) { if (kparam[i].perm == 0) continue; err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name); if (err) return err; params = true; } if (!params) return 0; /* Create the param group. */ err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); if (err) free_module_param_attrs(&mod->mkobj); return err; } /* * module_param_sysfs_remove - remove sysfs support for one module * @mod: module * * Remove sysfs entries for module parameters and the corresponding * kobject. */ void module_param_sysfs_remove(struct module *mod) { if (mod->mkobj.mp) { sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp); /* We are positive that no one is using any param * attrs at this point. Deallocate immediately. */ free_module_param_attrs(&mod->mkobj); } } #endif void destroy_params(const struct kernel_param *params, unsigned num) { unsigned int i; for (i = 0; i < num; i++) if (params[i].ops->free) params[i].ops->free(params[i].arg); } static struct module_kobject * __init locate_module_kobject(const char *name) { struct module_kobject *mk; struct kobject *kobj; int err; kobj = kset_find_obj(module_kset, name); if (kobj) { mk = to_module_kobject(kobj); } else { mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); BUG_ON(!mk); mk->mod = THIS_MODULE; mk->kobj.kset = module_kset; err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); if (err) { kobject_put(&mk->kobj); printk(KERN_ERR "Module '%s' failed add to sysfs, error number %d\n", name, err); printk(KERN_ERR "The system will be unstable now.\n"); return NULL; } /* So that we hold reference in both cases. */ kobject_get(&mk->kobj); } return mk; } static void __init kernel_add_sysfs_param(const char *name, struct kernel_param *kparam, unsigned int name_skip) { struct module_kobject *mk; int err; mk = locate_module_kobject(name); if (!mk) return; /* We need to remove old parameters before adding more. */ if (mk->mp) sysfs_remove_group(&mk->kobj, &mk->mp->grp); /* These should not fail at boot. */ err = add_sysfs_param(mk, kparam, kparam->name + name_skip); BUG_ON(err); err = sysfs_create_group(&mk->kobj, &mk->mp->grp); BUG_ON(err); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } /* * param_sysfs_builtin - add contents in /sys/parameters for built-in modules * * Add module_parameters to sysfs for "modules" built into the kernel. * * The "module" name (KBUILD_MODNAME) is stored before a dot, the * "parameter" name is stored behind a dot in kernel_param->name. So, * extract the "module" name for all built-in kernel_param-eters, * and for all who have the same, call kernel_add_sysfs_param. */ static void __init param_sysfs_builtin(void) { struct kernel_param *kp; unsigned int name_len; char modname[MODULE_NAME_LEN]; for (kp = __start___param; kp < __stop___param; kp++) { char *dot; if (kp->perm == 0) continue; dot = strchr(kp->name, '.'); if (!dot) { /* This happens for core_param() */ strcpy(modname, "kernel"); name_len = 0; } else { name_len = dot - kp->name + 1; strlcpy(modname, kp->name, name_len); } kernel_add_sysfs_param(modname, kp, name_len); } } ssize_t __modver_version_show(struct module_attribute *mattr, struct module *mod, char *buf) { struct module_version_attribute *vattr = container_of(mattr, struct module_version_attribute, mattr); return sprintf(buf, "%s\n", vattr->version); } extern const struct module_version_attribute *__start___modver[]; extern const struct module_version_attribute *__stop___modver[]; static void __init version_sysfs_builtin(void) { const struct module_version_attribute **p; struct module_kobject *mk; int err; for (p = __start___modver; p < __stop___modver; p++) { const struct module_version_attribute *vattr = *p; mk = locate_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); kobject_uevent(&mk->kobj, KOBJ_ADD); kobject_put(&mk->kobj); } } } /* module-related sysfs stuff */ static ssize_t module_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->show) return -EIO; ret = attribute->show(attribute, mk->mod, buf); return ret; } static ssize_t module_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct module_attribute *attribute; struct module_kobject *mk; int ret; attribute = to_module_attr(attr); mk = to_module_kobject(kobj); if (!attribute->store) return -EIO; ret = attribute->store(attribute, mk->mod, buf, len); return ret; } static const struct sysfs_ops module_sysfs_ops = { .show = module_attr_show, .store = module_attr_store, }; static int uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &module_ktype) return 1; return 0; } static const struct kset_uevent_ops module_uevent_ops = { .filter = uevent_filter, }; struct kset *module_kset; int module_sysfs_initialized; struct kobj_type module_ktype = { .sysfs_ops = &module_sysfs_ops, }; /* * param_sysfs_init - wrapper for built-in params support */ static int __init param_sysfs_init(void) { module_kset = kset_create_and_add("module", &module_uevent_ops, NULL); if (!module_kset) { printk(KERN_WARNING "%s (%d): error creating kset\n", __FILE__, __LINE__); return -ENOMEM; } module_sysfs_initialized = 1; version_sysfs_builtin(); param_sysfs_builtin(); return 0; } subsys_initcall(param_sysfs_init); #endif /* CONFIG_SYSFS */
gpl-2.0
DirtyUnicorns/android_kernel_asus_Z00A
arch/blackfin/mach-bf561/boards/acvilon.c
2730
12830
/* * File: arch/blackfin/mach-bf561/acvilon.c * Based on: arch/blackfin/mach-bf561/ezkit.c * Author: * * Created: * Description: * * Modified: * Copyright 2004-2006 Analog Devices Inc. * Copyright 2009 CJSC "NII STT" * * Bugs: * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * * For more information about Acvilon BF561 SoM please * go to http://www.niistt.ru/ * */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/mtd/plat-ram.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/i2c-pca-platform.h> #include <linux/delay.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <asm/cacheflush.h> #include <linux/i2c.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Acvilon board"; #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x20000000, .end = 0x20000000 + 0x000fffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF15, .end = IRQ_PF15, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .port1_disable = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760-hcd", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif static struct resource bfin_i2c_pca_resources[] = { { .name = "pca9564-regs", .start = 0x2C000000, .end = 0x2C000000 + 16, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { .start = IRQ_PF8, .end = IRQ_PF8, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; struct i2c_pca9564_pf_platform_data pca9564_platform_data = { .gpio = -1, .i2c_clock_speed = 330000, .timeout = HZ, }; /* PCA9564 I2C Bus driver */ static struct platform_device bfin_i2c_pca_device = { .name = "i2c-pca-platform", .id = 0, .num_resources = ARRAY_SIZE(bfin_i2c_pca_resources), .resource = bfin_i2c_pca_resources, .dev = { .platform_data = &pca9564_platform_data, } }; /* I2C devices fitted. */ static struct i2c_board_info acvilon_i2c_devs[] __initdata = { { I2C_BOARD_INFO("ds1339", 0x68), }, { I2C_BOARD_INFO("tcn75", 0x49), }, }; #if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) static struct platdata_mtd_ram mtd_ram_data = { .mapname = "rootfs(RAM)", .bankwidth = 4, }; static struct resource mtd_ram_resource = { .start = 0x4000000, .end = 0x5ffffff, .flags = IORESOURCE_MEM, }; static struct platform_device mtd_ram_device = { .name = "mtd-ram", .id = 0, .dev = { .platform_data = &mtd_ram_data, }, .num_resources = 1, .resource = &mtd_ram_resource, }; #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> static struct resource smsc911x_resources[] = { { .name = "smsc911x-memory", .start = 0x28000000, .end = 0x28000000 + 0xFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct smsc911x_platform_config smsc911x_config = { .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL + 2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART_TX, .end = IRQ_UART_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_RX, .end = IRQ_UART_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_ERROR, .end = IRQ_UART_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART_TX, .end = CH_UART_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART_RX, .end = CH_UART_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { /* Passed to driver */ .platform_data = &bfin_uart0_peripherals, }, }; #endif #endif #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) static struct mtd_partition bfin_plat_nand_partitions[] = { { .name = "params(nand)", .size = 32 * 1024 * 1024, .offset = 0, }, { .name = "userfs(nand)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, }, }; #define BFIN_NAND_PLAT_CLE 2 #define BFIN_NAND_PLAT_ALE 3 static void bfin_plat_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *this = mtd->priv; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_CLE)); else writeb(cmd, this->IO_ADDR_W + (1 << BFIN_NAND_PLAT_ALE)); } #define BFIN_NAND_PLAT_READY GPIO_PF10 static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) { return gpio_get_value(BFIN_NAND_PLAT_READY); } static struct platform_nand_data bfin_plat_nand_data = { .chip = { .nr_chips = 1, .chip_delay = 30, .partitions = bfin_plat_nand_partitions, .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), }, .ctrl = { .cmd_ctrl = bfin_plat_nand_cmd_ctrl, .dev_ready = bfin_plat_nand_dev_ready, }, }; #define MAX(x, y) (x > y ? x : y) static struct resource bfin_plat_nand_resources = { .start = 0x24000000, .end = 0x24000000 + (1 << MAX(BFIN_NAND_PLAT_CLE, BFIN_NAND_PLAT_ALE)), .flags = IORESOURCE_MEM, }; static struct platform_device bfin_async_nand_device = { .name = "gen_nand", .id = -1, .num_resources = 1, .resource = &bfin_plat_nand_resources, .dev = { .platform_data = &bfin_plat_nand_data, }, }; static void bfin_plat_nand_init(void) { gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat"); } #else static void bfin_plat_nand_init(void) { } #endif #if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) static struct mtd_partition bfin_spi_dataflash_partitions[] = { { .name = "bootloader", .size = 0x4200, .offset = 0, .mask_flags = MTD_CAP_ROM}, { .name = "u-boot", .size = 0x42000, .offset = MTDPART_OFS_APPEND, }, { .name = "u-boot(params)", .size = 0x4200, .offset = MTDPART_OFS_APPEND, }, { .name = "kernel", .size = 0x294000, .offset = MTDPART_OFS_APPEND, }, { .name = "params", .size = 0x42000, .offset = MTDPART_OFS_APPEND, }, { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_dataflash_data = { .name = "SPI Dataflash", .parts = bfin_spi_dataflash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions), }; /* DataFlash chip */ static struct bfin5xx_spi_chip data_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip */ }; #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 3, }, #endif #if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE) { /* DataFlash chip */ .modalias = "mtd_dataflash", .max_speed_hz = 33250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 2, /* Framework chip select */ .platform_data = &bfin_spi_dataflash_data, .controller_data = &data_flash_chip_info, .mode = SPI_MODE_3, }, #endif }; static struct resource bfin_gpios_resources = { .start = 31, /* .end = MAX_BLACKFIN_GPIOS - 1, */ .end = 32, .flags = IORESOURCE_IRQ, }; static struct platform_device bfin_gpios_device = { .name = "simple-gpio", .id = -1, .num_resources = 1, .resource = &bfin_gpios_resources, }; static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 300000000), VRPAIR(VLEV_095, 313000000), VRPAIR(VLEV_100, 350000000), VRPAIR(VLEV_105, 400000000), VRPAIR(VLEV_110, 444000000), VRPAIR(VLEV_115, 450000000), VRPAIR(VLEV_120, 475000000), VRPAIR(VLEV_125, 500000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */ , }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *acvilon_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif &bfin_gpios_device, #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) &smsc911x_device, #endif &bfin_i2c_pca_device, #if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) &bfin_async_nand_device, #endif #if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE) &mtd_ram_device, #endif }; static int __init acvilon_init(void) { int ret; printk(KERN_INFO "%s(): registering device resources\n", __func__); bfin_plat_nand_init(); ret = platform_add_devices(acvilon_devices, ARRAY_SIZE(acvilon_devices)); if (ret < 0) return ret; i2c_register_board_info(0, acvilon_i2c_devs, ARRAY_SIZE(acvilon_i2c_devs)); bfin_write_FIO0_FLAG_C(1 << 14); msleep(5); bfin_write_FIO0_FLAG_S(1 << 14); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(acvilon_init); static struct platform_device *acvilon_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(acvilon_early_devices, ARRAY_SIZE(acvilon_early_devices)); }
gpl-2.0
CyanogenMod/lge-kernel-iproj
arch/sh/kernel/cpu/irq/imask.c
2986
1962
/* * arch/sh/kernel/cpu/irq/imask.c * * Copyright (C) 1999, 2000 Niibe Yutaka * * Simple interrupt handling using IMASK of SR register. * */ /* NOTE: Will not work on level 15 */ #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/irq.h> #include <linux/bitmap.h> #include <asm/system.h> #include <asm/irq.h> /* Bitmap of IRQ masked */ #define IMASK_PRIORITY 15 static DECLARE_BITMAP(imask_mask, IMASK_PRIORITY); static int interrupt_priority; static inline void set_interrupt_registers(int ip) { unsigned long __dummy; asm volatile( #ifdef CONFIG_CPU_HAS_SR_RB "ldc %2, r6_bank\n\t" #endif "stc sr, %0\n\t" "and #0xf0, %0\n\t" "shlr2 %0\n\t" "cmp/eq #0x3c, %0\n\t" "bt/s 1f ! CLI-ed\n\t" " stc sr, %0\n\t" "and %1, %0\n\t" "or %2, %0\n\t" "ldc %0, sr\n" "1:" : "=&z" (__dummy) : "r" (~0xf0), "r" (ip << 4) : "t"); } static void mask_imask_irq(struct irq_data *data) { unsigned int irq = data->irq; clear_bit(irq, imask_mask); if (interrupt_priority < IMASK_PRIORITY - irq) interrupt_priority = IMASK_PRIORITY - irq; set_interrupt_registers(interrupt_priority); } static void unmask_imask_irq(struct irq_data *data) { unsigned int irq = data->irq; set_bit(irq, imask_mask); interrupt_priority = IMASK_PRIORITY - find_first_zero_bit(imask_mask, IMASK_PRIORITY); set_interrupt_registers(interrupt_priority); } static struct irq_chip imask_irq_chip = { .name = "SR.IMASK", .irq_mask = mask_imask_irq, .irq_unmask = unmask_imask_irq, .irq_mask_ack = mask_imask_irq, }; void make_imask_irq(unsigned int irq) { irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq, "level"); }
gpl-2.0
dreamcwli/android_kernel_moto_wingray
net/ipv4/netfilter/nf_nat_proto_sctp.c
3242
2617
/* * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/sctp.h> #include <net/sctp/checksum.h> #include <net/netfilter/nf_nat_protocol.h> static u_int16_t nf_sctp_port_rover; static void sctp_unique_tuple(struct nf_conntrack_tuple *tuple, const struct nf_nat_range *range, enum nf_nat_manip_type maniptype, const struct nf_conn *ct) { nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &nf_sctp_port_rover); } static bool sctp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); struct sk_buff *frag; sctp_sctphdr_t *hdr; unsigned int hdroff = iphdroff + iph->ihl*4; __be32 oldip, newip; __be32 crc32; if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) return false; iph = (struct iphdr *)(skb->data + iphdroff); hdr = (struct sctphdr *)(skb->data + hdroff); if (maniptype == IP_NAT_MANIP_SRC) { /* Get rid of src ip and src pt */ oldip = iph->saddr; newip = tuple->src.u3.ip; hdr->source = tuple->src.u.sctp.port; } else { /* Get rid of dst ip and dst pt */ oldip = iph->daddr; newip = tuple->dst.u3.ip; hdr->dest = tuple->dst.u.sctp.port; } crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); skb_walk_frags(skb, frag) crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), crc32); crc32 = sctp_end_cksum(crc32); hdr->checksum = crc32; return true; } static const struct nf_nat_protocol nf_nat_protocol_sctp = { .protonum = IPPROTO_SCTP, .me = THIS_MODULE, .manip_pkt = sctp_manip_pkt, .in_range = nf_nat_proto_in_range, .unique_tuple = sctp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) .range_to_nlattr = nf_nat_proto_range_to_nlattr, .nlattr_to_range = nf_nat_proto_nlattr_to_range, #endif }; static int __init nf_nat_proto_sctp_init(void) { return nf_nat_protocol_register(&nf_nat_protocol_sctp); } static void __exit nf_nat_proto_sctp_exit(void) { nf_nat_protocol_unregister(&nf_nat_protocol_sctp); } module_init(nf_nat_proto_sctp_init); module_exit(nf_nat_proto_sctp_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SCTP NAT protocol helper"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
gpl-2.0
showliu/android_kernel_xiaomi_aries-1
kernel/sched/auto_group.c
3498
5746
#ifdef CONFIG_SCHED_AUTOGROUP #include "sched.h" #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/utsname.h> #include <linux/security.h> #include <linux/export.h> unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; void __init autogroup_init(struct task_struct *init_task) { autogroup_default.tg = &root_task_group; kref_init(&autogroup_default.kref); init_rwsem(&autogroup_default.lock); init_task->signal->autogroup = &autogroup_default; } void autogroup_free(struct task_group *tg) { kfree(tg->autogroup); } static inline void autogroup_destroy(struct kref *kref) { struct autogroup *ag = container_of(kref, struct autogroup, kref); #ifdef CONFIG_RT_GROUP_SCHED /* We've redirected RT tasks to the root task group... */ ag->tg->rt_se = NULL; ag->tg->rt_rq = NULL; #endif sched_destroy_group(ag->tg); } static inline void autogroup_kref_put(struct autogroup *ag) { kref_put(&ag->kref, autogroup_destroy); } static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) { kref_get(&ag->kref); return ag; } static inline struct autogroup *autogroup_task_get(struct task_struct *p) { struct autogroup *ag; unsigned long flags; if (!lock_task_sighand(p, &flags)) return autogroup_kref_get(&autogroup_default); ag = autogroup_kref_get(p->signal->autogroup); unlock_task_sighand(p, &flags); return ag; } static inline struct autogroup *autogroup_create(void) { struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); struct task_group *tg; if (!ag) goto out_fail; tg = sched_create_group(&root_task_group); if (IS_ERR(tg)) goto out_free; kref_init(&ag->kref); init_rwsem(&ag->lock); ag->id = atomic_inc_return(&autogroup_seq_nr); ag->tg = tg; #ifdef CONFIG_RT_GROUP_SCHED /* * Autogroup RT tasks are redirected to the root task group * so we don't have to move tasks around upon policy change, * or flail around trying to allocate bandwidth on the fly. * A bandwidth exception in __sched_setscheduler() allows * the policy change to proceed. Thereafter, task_group() * returns &root_task_group, so zero bandwidth is required. */ free_rt_sched_group(tg); tg->rt_se = root_task_group.rt_se; tg->rt_rq = root_task_group.rt_rq; #endif tg->autogroup = ag; return ag; out_free: kfree(ag); out_fail: if (printk_ratelimit()) { printk(KERN_WARNING "autogroup_create: %s failure.\n", ag ? "sched_create_group()" : "kmalloc()"); } return autogroup_kref_get(&autogroup_default); } bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) { if (tg != &root_task_group) return false; if (p->sched_class != &fair_sched_class) return false; /* * We can only assume the task group can't go away on us if * autogroup_move_group() can see us on ->thread_group list. */ if (p->flags & PF_EXITING) return false; return true; } static void autogroup_move_group(struct task_struct *p, struct autogroup *ag) { struct autogroup *prev; struct task_struct *t; unsigned long flags; BUG_ON(!lock_task_sighand(p, &flags)); prev = p->signal->autogroup; if (prev == ag) { unlock_task_sighand(p, &flags); return; } p->signal->autogroup = autogroup_kref_get(ag); if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) goto out; t = p; do { sched_move_task(t); } while_each_thread(p, t); out: unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } /* Allocates GFP_KERNEL, cannot be called under any spinlock */ void sched_autogroup_create_attach(struct task_struct *p) { struct autogroup *ag = autogroup_create(); autogroup_move_group(p, ag); /* drop extra reference added by autogroup_create() */ autogroup_kref_put(ag); } EXPORT_SYMBOL(sched_autogroup_create_attach); /* Cannot be called under siglock. Currently has no users */ void sched_autogroup_detach(struct task_struct *p) { autogroup_move_group(p, &autogroup_default); } EXPORT_SYMBOL(sched_autogroup_detach); void sched_autogroup_fork(struct signal_struct *sig) { sig->autogroup = autogroup_task_get(current); } void sched_autogroup_exit(struct signal_struct *sig) { autogroup_kref_put(sig->autogroup); } static int __init setup_autogroup(char *str) { sysctl_sched_autogroup_enabled = 0; return 1; } __setup("noautogroup", setup_autogroup); #ifdef CONFIG_PROC_FS int proc_sched_autogroup_set_nice(struct task_struct *p, int nice) { static unsigned long next = INITIAL_JIFFIES; struct autogroup *ag; int err; if (nice < -20 || nice > 19) return -EINVAL; err = security_task_setnice(current, nice); if (err) return err; if (nice < 0 && !can_nice(current, nice)) return -EPERM; /* this is a heavy operation taking global locks.. */ if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) return -EAGAIN; next = HZ / 10 + jiffies; ag = autogroup_task_get(p); down_write(&ag->lock); err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]); if (!err) ag->nice = nice; up_write(&ag->lock); autogroup_kref_put(ag); return err; } void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) { struct autogroup *ag = autogroup_task_get(p); if (!task_group_is_autogroup(ag->tg)) goto out; down_read(&ag->lock); seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); up_read(&ag->lock); out: autogroup_kref_put(ag); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SCHED_DEBUG int autogroup_path(struct task_group *tg, char *buf, int buflen) { if (!task_group_is_autogroup(tg)) return 0; return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); } #endif /* CONFIG_SCHED_DEBUG */ #endif /* CONFIG_SCHED_AUTOGROUP */
gpl-2.0
kbehren/android_kernel_lenovo_msm8226
arch/sparc/kernel/leon_smp.c
4266
13712
/* leon_smp.c: Sparc-Leon SMP support. * * based on sun4m_smp.c * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB */ #include <asm/head.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/of.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/profile.h> #include <linux/pm.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/irq_regs.h> #include <asm/traps.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/oplib.h> #include <asm/cpudata.h> #include <asm/asi.h> #include <asm/leon.h> #include <asm/leon_amba.h> #include "kernel.h" #ifdef CONFIG_SPARC_LEON #include "irq.h" extern ctxd_t *srmmu_ctx_table_phys; static int smp_processors_ready; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpumask_t smp_commenced_mask; void __init leon_configure_cache_smp(void); static void leon_ipi_init(void); /* IRQ number of LEON IPIs */ int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT; static inline unsigned long do_swap(volatile unsigned long *ptr, unsigned long val) { __asm__ __volatile__("swapa [%2] %3, %0\n\t" : "=&r"(val) : "0"(val), "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) : "memory"); return val; } static void smp_setup_percpu_timer(void); void __cpuinit leon_callin(void) { int cpuid = hard_smpleon_processor_id(); local_flush_cache_all(); local_flush_tlb_all(); leon_configure_cache_smp(); notify_cpu_starting(cpuid); /* Get our local ticker going. */ smp_setup_percpu_timer(); calibrate_delay(); smp_store_cpu_info(cpuid); local_flush_cache_all(); local_flush_tlb_all(); /* * Unblock the master CPU _only_ when the scheduler state * of all secondary CPUs will be up-to-date, so after * the SMP initialization the master will be just allowed * to call the scheduler code. * Allow master to continue. */ do_swap(&cpu_callin_map[cpuid], 1); local_flush_cache_all(); local_flush_tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(&current_set[cpuid]) : "memory" /* paranoid */); /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); local_irq_enable(); set_cpu_online(cpuid, true); } /* * Cycle through the processors asking the PROM to start each one. */ extern struct linux_prom_registers smp_penguin_ctable; void __init leon_configure_cache_smp(void) { unsigned long cfg = sparc_leon3_get_dcachecfg(); int me = smp_processor_id(); if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) { printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n", (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg), (unsigned int)cfg, (unsigned int)me); sparc_leon3_disable_cache(); } else { if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) { sparc_leon3_enable_snooping(); } else { printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n", me); sparc_leon3_disable_cache(); } } local_flush_cache_all(); local_flush_tlb_all(); } void leon_smp_setbroadcast(unsigned int mask) { int broadcast = ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> LEON3_IRQMPSTATUS_BROADCAST) & 1); if (!broadcast) { prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", leon_smp_nrcpus()); if (leon_smp_nrcpus() > 1) { BUG(); } else { prom_printf("continue anyway\n"); return; } } LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); } unsigned int leon_smp_getbroadcast(void) { unsigned int mask; mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast)); return mask; } int leon_smp_nrcpus(void) { int nrcpu = ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1; return nrcpu; } void __init leon_boot_cpus(void) { int nrcpu = leon_smp_nrcpus(); int me = smp_processor_id(); /* Setup IPI */ leon_ipi_init(); printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me, (unsigned int)nrcpu, (unsigned int)NR_CPUS, (unsigned int)&(leon3_irqctrl_regs->mpstatus)); leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); leon_enable_irq_cpu(leon_ipi_irq, me); leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); leon_configure_cache_smp(); smp_setup_percpu_timer(); local_flush_cache_all(); } int __cpuinit leon_boot_one_cpu(int i) { struct task_struct *p; int timeout; /* Cook up an idler for this guy. */ p = fork_idle(i); current_set[i] = task_thread_info(p); /* See trampoline.S:leon_smp_cpu_startup for details... * Initialize the contexts table * Since the call to prom_startcpu() trashes the structure, * we need to re-initialize it for each cpu */ smp_penguin_ctable.which_io = 0; smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys; smp_penguin_ctable.reg_size = 0; /* whirrr, whirrr, whirrrrrrrrr... */ printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, (unsigned int)&leon3_irqctrl_regs->mpstatus); local_flush_cache_all(); /* Make sure all IRQs are of from the start for this new CPU */ LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0); /* Wake one CPU */ LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); /* wheee... it's going... */ for (timeout = 0; timeout < 10000; timeout++) { if (cpu_callin_map[i]) break; udelay(200); } printk(KERN_INFO "Started CPU %d\n", (unsigned int)i); if (!(cpu_callin_map[i])) { printk(KERN_ERR "Processor %d is stuck.\n", i); return -ENODEV; } else { leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); leon_enable_irq_cpu(leon_ipi_irq, i); } local_flush_cache_all(); return 0; } void __init leon_smp_done(void) { int i, first; int *prev; /* setup cpu list for irq rotation */ first = 0; prev = &first; for (i = 0; i < NR_CPUS; i++) { if (cpu_online(i)) { *prev = i; prev = &cpu_data(i).next; } } *prev = first; local_flush_cache_all(); /* Free unneeded trap tables */ if (!cpu_present(1)) { ClearPageReserved(virt_to_page(&trapbase_cpu1)); init_page_count(virt_to_page(&trapbase_cpu1)); free_page((unsigned long)&trapbase_cpu1); totalram_pages++; num_physpages++; } if (!cpu_present(2)) { ClearPageReserved(virt_to_page(&trapbase_cpu2)); init_page_count(virt_to_page(&trapbase_cpu2)); free_page((unsigned long)&trapbase_cpu2); totalram_pages++; num_physpages++; } if (!cpu_present(3)) { ClearPageReserved(virt_to_page(&trapbase_cpu3)); init_page_count(virt_to_page(&trapbase_cpu3)); free_page((unsigned long)&trapbase_cpu3); totalram_pages++; num_physpages++; } /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; } void leon_irq_rotate(int cpu) { } struct leon_ipi_work { int single; int msk; int resched; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work); /* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ * is used for all three types of IPIs. */ static void __init leon_ipi_init(void) { int cpu, len; struct leon_ipi_work *work; struct property *pp; struct device_node *rootnp; struct tt_entry *trap_table; unsigned long flags; /* Find IPI IRQ or stick with default value */ rootnp = of_find_node_by_path("/ambapp0"); if (rootnp) { pp = of_find_property(rootnp, "ipi_num", &len); if (pp && (*(int *)pp->value)) leon_ipi_irq = *(int *)pp->value; } printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq); /* Adjust so that we jump directly to smpleon_ipi */ local_irq_save(flags); trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)]; trap_table->inst_three += smpleon_ipi - real_irq_entry; local_flush_cache_all(); local_irq_restore(flags); for_each_possible_cpu(cpu) { work = &per_cpu(leon_ipi_work, cpu); work->single = work->msk = work->resched = 0; } } static void leon_ipi_single(int cpu) { struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); /* Mark work */ work->single = 1; /* Generate IRQ on the CPU */ set_cpu_int(cpu, leon_ipi_irq); } static void leon_ipi_mask_one(int cpu) { struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); /* Mark work */ work->msk = 1; /* Generate IRQ on the CPU */ set_cpu_int(cpu, leon_ipi_irq); } static void leon_ipi_resched(int cpu) { struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); /* Mark work */ work->resched = 1; /* Generate IRQ on the CPU (any IRQ will cause resched) */ set_cpu_int(cpu, leon_ipi_irq); } void leonsmp_ipi_interrupt(void) { struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work); if (work->single) { work->single = 0; smp_call_function_single_interrupt(); } if (work->msk) { work->msk = 0; smp_call_function_interrupt(); } if (work->resched) { work->resched = 0; smp_resched_interrupt(); } } static struct smp_funcall { smpfunc_t func; unsigned long arg1; unsigned long arg2; unsigned long arg3; unsigned long arg4; unsigned long arg5; unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ } ccall_info; static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { if (smp_processors_ready) { register int high = NR_CPUS - 1; unsigned long flags; spin_lock_irqsave(&cross_call_lock, flags); { /* If you make changes here, make sure gcc generates proper code... */ register smpfunc_t f asm("i0") = func; register unsigned long a1 asm("i1") = arg1; register unsigned long a2 asm("i2") = arg2; register unsigned long a3 asm("i3") = arg3; register unsigned long a4 asm("i4") = arg4; register unsigned long a5 asm("i5") = 0; __asm__ __volatile__("std %0, [%6]\n\t" "std %2, [%6 + 8]\n\t" "std %4, [%6 + 16]\n\t" : : "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), "r"(&ccall_info.func)); } /* Init receive/complete mapping, plus fire the IPI's off. */ { register int i; cpumask_clear_cpu(smp_processor_id(), &mask); cpumask_and(&mask, cpu_online_mask, &mask); for (i = 0; i <= high; i++) { if (cpumask_test_cpu(i, &mask)) { ccall_info.processors_in[i] = 0; ccall_info.processors_out[i] = 0; set_cpu_int(i, LEON3_IRQ_CROSS_CALL); } } } { register int i; i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_in[i]) barrier(); } while (++i <= high); i = 0; do { if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_out[i]) barrier(); } while (++i <= high); } spin_unlock_irqrestore(&cross_call_lock, flags); } } /* Running cross calls. */ void leon_cross_call_irq(void) { int i = smp_processor_id(); ccall_info.processors_in[i] = 1; ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, ccall_info.arg5); ccall_info.processors_out[i] = 1; } irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused) { int cpu = smp_processor_id(); leon_clear_profile_irq(cpu); profile_tick(CPU_PROFILING); if (!--prof_counter(cpu)) { int user = user_mode(get_irq_regs()); update_process_times(user); prof_counter(cpu) = prof_multiplier(cpu); } return IRQ_HANDLED; } static void __init smp_setup_percpu_timer(void) { int cpu = smp_processor_id(); prof_counter(cpu) = prof_multiplier(cpu) = 1; } void __init leon_blackbox_id(unsigned *addr) { int rd = *addr & 0x3e000000; int rs1 = rd >> 11; /* patch places where ___b_hard_smp_processor_id appears */ addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ addr[2] = 0x01000000; /* nop */ } void __init leon_blackbox_current(unsigned *addr) { int rd = *addr & 0x3e000000; int rs1 = rd >> 11; /* patch LOAD_CURRENT macro where ___b_load_current appears */ addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */ } void __init leon_init_smp(void) { /* Patch ipi15 trap table */ t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id); BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current); BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(smp_ipi_resched, leon_ipi_resched, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(smp_ipi_single, leon_ipi_single, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(smp_ipi_mask_one, leon_ipi_mask_one, BTFIXUPCALL_NORM); } #endif /* CONFIG_SPARC_LEON */
gpl-2.0
ntrdma/ntrdma
drivers/pinctrl/nomadik/pinctrl-ab8540.c
4522
19085
/* * Copyright (C) ST-Ericsson SA 2012 * * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/pinctrl/pinctrl.h> #include <linux/mfd/abx500/ab8500.h> #include "pinctrl-abx500.h" /* All the pins that can be used for GPIO and some other functions */ #define ABX500_GPIO(offset) (offset) #define AB8540_PIN_J16 ABX500_GPIO(1) #define AB8540_PIN_D17 ABX500_GPIO(2) #define AB8540_PIN_C12 ABX500_GPIO(3) #define AB8540_PIN_G12 ABX500_GPIO(4) /* hole */ #define AB8540_PIN_D16 ABX500_GPIO(14) #define AB8540_PIN_F15 ABX500_GPIO(15) #define AB8540_PIN_J8 ABX500_GPIO(16) #define AB8540_PIN_K16 ABX500_GPIO(17) #define AB8540_PIN_G15 ABX500_GPIO(18) #define AB8540_PIN_F17 ABX500_GPIO(19) #define AB8540_PIN_E17 ABX500_GPIO(20) /* hole */ #define AB8540_PIN_AA16 ABX500_GPIO(27) #define AB8540_PIN_W18 ABX500_GPIO(28) #define AB8540_PIN_Y15 ABX500_GPIO(29) #define AB8540_PIN_W16 ABX500_GPIO(30) #define AB8540_PIN_V15 ABX500_GPIO(31) #define AB8540_PIN_W17 ABX500_GPIO(32) /* hole */ #define AB8540_PIN_D12 ABX500_GPIO(42) #define AB8540_PIN_P4 ABX500_GPIO(43) #define AB8540_PIN_AB1 ABX500_GPIO(44) #define AB8540_PIN_K7 ABX500_GPIO(45) #define AB8540_PIN_L7 ABX500_GPIO(46) #define AB8540_PIN_G10 ABX500_GPIO(47) #define AB8540_PIN_K12 ABX500_GPIO(48) /* hole */ #define AB8540_PIN_N8 ABX500_GPIO(51) #define AB8540_PIN_P12 ABX500_GPIO(52) #define AB8540_PIN_K8 ABX500_GPIO(53) #define AB8540_PIN_J11 ABX500_GPIO(54) #define AB8540_PIN_AC2 ABX500_GPIO(55) #define AB8540_PIN_AB2 ABX500_GPIO(56) /* indicates the highest GPIO number */ #define AB8540_GPIO_MAX_NUMBER 56 /* * The names of the pins are denoted by GPIO number and ball name, even * though they can be used for other things than GPIO, this is the first * column in the table of the data sheet and often used on schematics and * such. */ static const struct pinctrl_pin_desc ab8540_pins[] = { PINCTRL_PIN(AB8540_PIN_J16, "GPIO1_J16"), PINCTRL_PIN(AB8540_PIN_D17, "GPIO2_D17"), PINCTRL_PIN(AB8540_PIN_C12, "GPIO3_C12"), PINCTRL_PIN(AB8540_PIN_G12, "GPIO4_G12"), /* hole */ PINCTRL_PIN(AB8540_PIN_D16, "GPIO14_D16"), PINCTRL_PIN(AB8540_PIN_F15, "GPIO15_F15"), PINCTRL_PIN(AB8540_PIN_J8, "GPIO16_J8"), PINCTRL_PIN(AB8540_PIN_K16, "GPIO17_K16"), PINCTRL_PIN(AB8540_PIN_G15, "GPIO18_G15"), PINCTRL_PIN(AB8540_PIN_F17, "GPIO19_F17"), PINCTRL_PIN(AB8540_PIN_E17, "GPIO20_E17"), /* hole */ PINCTRL_PIN(AB8540_PIN_AA16, "GPIO27_AA16"), PINCTRL_PIN(AB8540_PIN_W18, "GPIO28_W18"), PINCTRL_PIN(AB8540_PIN_Y15, "GPIO29_Y15"), PINCTRL_PIN(AB8540_PIN_W16, "GPIO30_W16"), PINCTRL_PIN(AB8540_PIN_V15, "GPIO31_V15"), PINCTRL_PIN(AB8540_PIN_W17, "GPIO32_W17"), /* hole */ PINCTRL_PIN(AB8540_PIN_D12, "GPIO42_D12"), PINCTRL_PIN(AB8540_PIN_P4, "GPIO43_P4"), PINCTRL_PIN(AB8540_PIN_AB1, "GPIO44_AB1"), PINCTRL_PIN(AB8540_PIN_K7, "GPIO45_K7"), PINCTRL_PIN(AB8540_PIN_L7, "GPIO46_L7"), PINCTRL_PIN(AB8540_PIN_G10, "GPIO47_G10"), PINCTRL_PIN(AB8540_PIN_K12, "GPIO48_K12"), /* hole */ PINCTRL_PIN(AB8540_PIN_N8, "GPIO51_N8"), PINCTRL_PIN(AB8540_PIN_P12, "GPIO52_P12"), PINCTRL_PIN(AB8540_PIN_K8, "GPIO53_K8"), PINCTRL_PIN(AB8540_PIN_J11, "GPIO54_J11"), PINCTRL_PIN(AB8540_PIN_AC2, "GPIO55_AC2"), PINCTRL_PIN(AB8540_PIN_AB2, "GPIO56_AB2"), }; /* * Maps local GPIO offsets to local pin numbers */ static const struct abx500_pinrange ab8540_pinranges[] = { ABX500_PINRANGE(1, 4, ABX500_ALT_A), ABX500_PINRANGE(14, 7, ABX500_ALT_A), ABX500_PINRANGE(27, 6, ABX500_ALT_A), ABX500_PINRANGE(42, 7, ABX500_ALT_A), ABX500_PINRANGE(51, 6, ABX500_ALT_A), }; /* * Read the pin group names like this: * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function * * The groups are arranged as sets per altfunction column, so we can * mux in one group at a time by selecting the same altfunction for them * all. When functions require pins on different altfunctions, you need * to combine several groups. */ /* default column */ static const unsigned sysclkreq2_d_1_pins[] = { AB8540_PIN_J16 }; static const unsigned sysclkreq3_d_1_pins[] = { AB8540_PIN_D17 }; static const unsigned sysclkreq4_d_1_pins[] = { AB8540_PIN_C12 }; static const unsigned sysclkreq6_d_1_pins[] = { AB8540_PIN_G12 }; static const unsigned pwmout1_d_1_pins[] = { AB8540_PIN_D16 }; static const unsigned pwmout2_d_1_pins[] = { AB8540_PIN_F15 }; static const unsigned pwmout3_d_1_pins[] = { AB8540_PIN_J8 }; /* audio data interface 1*/ static const unsigned adi1_d_1_pins[] = { AB8540_PIN_K16, AB8540_PIN_G15, AB8540_PIN_F17, AB8540_PIN_E17 }; /* Digital microphone 1 and 2 */ static const unsigned dmic12_d_1_pins[] = { AB8540_PIN_AA16, AB8540_PIN_W18 }; /* Digital microphone 3 and 4 */ static const unsigned dmic34_d_1_pins[] = { AB8540_PIN_Y15, AB8540_PIN_W16 }; /* Digital microphone 5 and 6 */ static const unsigned dmic56_d_1_pins[] = { AB8540_PIN_V15, AB8540_PIN_W17 }; static const unsigned sysclkreq5_d_1_pins[] = { AB8540_PIN_D12 }; static const unsigned batremn_d_1_pins[] = { AB8540_PIN_P4 }; static const unsigned service_d_1_pins[] = { AB8540_PIN_AB1 }; static const unsigned pwrctrl0_d_1_pins[] = { AB8540_PIN_K7 }; static const unsigned pwrctrl1_d_1_pins[] = { AB8540_PIN_L7 }; static const unsigned pwmextvibra1_d_1_pins[] = { AB8540_PIN_G10 }; static const unsigned pwmextvibra2_d_1_pins[] = { AB8540_PIN_K12 }; static const unsigned gpio1_vbat_d_1_pins[] = { AB8540_PIN_N8 }; static const unsigned gpio2_vbat_d_1_pins[] = { AB8540_PIN_P12 }; static const unsigned gpio3_vbat_d_1_pins[] = { AB8540_PIN_K8 }; static const unsigned gpio4_vbat_d_1_pins[] = { AB8540_PIN_J11 }; static const unsigned pdmclkdat_d_1_pins[] = { AB8540_PIN_AC2, AB8540_PIN_AB2 }; /* Altfunction A column */ static const unsigned gpio1_a_1_pins[] = { AB8540_PIN_J16 }; static const unsigned gpio2_a_1_pins[] = { AB8540_PIN_D17 }; static const unsigned gpio3_a_1_pins[] = { AB8540_PIN_C12 }; static const unsigned gpio4_a_1_pins[] = { AB8540_PIN_G12 }; static const unsigned gpio14_a_1_pins[] = { AB8540_PIN_D16 }; static const unsigned gpio15_a_1_pins[] = { AB8540_PIN_F15 }; static const unsigned gpio16_a_1_pins[] = { AB8540_PIN_J8 }; static const unsigned gpio17_a_1_pins[] = { AB8540_PIN_K16 }; static const unsigned gpio18_a_1_pins[] = { AB8540_PIN_G15 }; static const unsigned gpio19_a_1_pins[] = { AB8540_PIN_F17 }; static const unsigned gpio20_a_1_pins[] = { AB8540_PIN_E17 }; static const unsigned gpio27_a_1_pins[] = { AB8540_PIN_AA16 }; static const unsigned gpio28_a_1_pins[] = { AB8540_PIN_W18 }; static const unsigned gpio29_a_1_pins[] = { AB8540_PIN_Y15 }; static const unsigned gpio30_a_1_pins[] = { AB8540_PIN_W16 }; static const unsigned gpio31_a_1_pins[] = { AB8540_PIN_V15 }; static const unsigned gpio32_a_1_pins[] = { AB8540_PIN_W17 }; static const unsigned gpio42_a_1_pins[] = { AB8540_PIN_D12 }; static const unsigned gpio43_a_1_pins[] = { AB8540_PIN_P4 }; static const unsigned gpio44_a_1_pins[] = { AB8540_PIN_AB1 }; static const unsigned gpio45_a_1_pins[] = { AB8540_PIN_K7 }; static const unsigned gpio46_a_1_pins[] = { AB8540_PIN_L7 }; static const unsigned gpio47_a_1_pins[] = { AB8540_PIN_G10 }; static const unsigned gpio48_a_1_pins[] = { AB8540_PIN_K12 }; static const unsigned gpio51_a_1_pins[] = { AB8540_PIN_N8 }; static const unsigned gpio52_a_1_pins[] = { AB8540_PIN_P12 }; static const unsigned gpio53_a_1_pins[] = { AB8540_PIN_K8 }; static const unsigned gpio54_a_1_pins[] = { AB8540_PIN_J11 }; static const unsigned gpio55_a_1_pins[] = { AB8540_PIN_AC2 }; static const unsigned gpio56_a_1_pins[] = { AB8540_PIN_AB2 }; #define AB8540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \ .npins = ARRAY_SIZE(a##_pins), .altsetting = b } static const struct abx500_pingroup ab8540_groups[] = { /* default column */ AB8540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(service_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwrctrl0_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwrctrl1_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwmextvibra1_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pwmextvibra2_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(gpio1_vbat_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(gpio2_vbat_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(gpio3_vbat_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(gpio4_vbat_d_1, ABX500_DEFAULT), AB8540_PIN_GROUP(pdmclkdat_d_1, ABX500_DEFAULT), /* Altfunction A column */ AB8540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio43_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio44_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio45_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio46_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio47_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio48_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio54_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio55_a_1, ABX500_ALT_A), AB8540_PIN_GROUP(gpio56_a_1, ABX500_ALT_A), }; /* We use this macro to define the groups applicable to a function */ #define AB8540_FUNC_GROUPS(a, b...) \ static const char * const a##_groups[] = { b }; AB8540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1", "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1"); AB8540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1", "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1", "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio27_a_1", "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1", "gpio32_a_1", "gpio42_a_1", "gpio43_a_1", "gpio44_a_1", "gpio45_a_1", "gpio46_a_1", "gpio47_a_1", "gpio48_a_1", "gpio51_a_1", "gpio52_a_1", "gpio53_a_1", "gpio54_a_1", "gpio55_a_1", "gpio56_a_1"); AB8540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1"); AB8540_FUNC_GROUPS(adi1, "adi1_d_1"); AB8540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1"); AB8540_FUNC_GROUPS(batremn, "batremn_d_1"); AB8540_FUNC_GROUPS(service, "service_d_1"); AB8540_FUNC_GROUPS(pwrctrl, "pwrctrl0_d_1", "pwrctrl1_d_1"); AB8540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_d_1", "pwmextvibra2_d_1"); AB8540_FUNC_GROUPS(gpio_vbat, "gpio1_vbat_d_1", "gpio2_vbat_d_1", "gpio3_vbat_d_1", "gpio4_vbat_d_1"); AB8540_FUNC_GROUPS(pdm, "pdmclkdat_d_1"); #define FUNCTION(fname) \ { \ .name = #fname, \ .groups = fname##_groups, \ .ngroups = ARRAY_SIZE(fname##_groups), \ } static const struct abx500_function ab8540_functions[] = { FUNCTION(sysclkreq), FUNCTION(gpio), FUNCTION(pwmout), FUNCTION(adi1), FUNCTION(dmic), FUNCTION(batremn), FUNCTION(service), FUNCTION(pwrctrl), FUNCTION(pwmextvibra), FUNCTION(gpio_vbat), FUNCTION(pdm), }; /* * this table translates what's is in the AB8540 specification regarding the * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C). * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1, * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val), * AB8540 only supports DEFAULT and ALTA functions, so ALTERNATFUNC * registers is not used * */ static struct alternate_functions ab8540_alternate_functions[AB8540_GPIO_MAX_NUMBER + 1] = { /* GPIOSEL1 - bit 4-7 reserved */ ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */ ALTERNATE_FUNCTIONS(1, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(2, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(3, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/ ALTERNATE_FUNCTIONS(4, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/ ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */ ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */ ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */ ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */ /* GPIOSEL2 - bit 0-4 reserved */ ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */ ALTERNATE_FUNCTIONS(10, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO10 */ ALTERNATE_FUNCTIONS(11, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO11 */ ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */ ALTERNATE_FUNCTIONS(13, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO13 */ ALTERNATE_FUNCTIONS(14, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(15, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(16, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */ /* GPIOSEL3 - bit 4-7 reserved */ ALTERNATE_FUNCTIONS(17, 0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */ ALTERNATE_FUNCTIONS(18, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(19, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(20, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21 */ ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22 */ ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23 */ ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24 */ /* GPIOSEL4 - bit 0-1 reserved */ ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25 */ ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */ ALTERNATE_FUNCTIONS(27, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(28, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(29, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(30, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(31, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(32, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */ /* GPIOSEL5 - bit 0-7 reserved */ ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */ ALTERNATE_FUNCTIONS(34, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO34 */ ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */ ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */ ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */ ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */ ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */ ALTERNATE_FUNCTIONS(40, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO40 */ /* GPIOSEL6 - bit 0 reserved */ ALTERNATE_FUNCTIONS(41, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO41 */ ALTERNATE_FUNCTIONS(42, 1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */ ALTERNATE_FUNCTIONS(43, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO43, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(44, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO44, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(45, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO45, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(46, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO46, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(47, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO47, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(48, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO48, altA controlled by bit 7 */ /* GPIOSEL7 - bit 0-1 reserved */ ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */ ALTERNATE_FUNCTIONS(50, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO50 */ ALTERNATE_FUNCTIONS(51, 2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */ ALTERNATE_FUNCTIONS(52, 3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */ ALTERNATE_FUNCTIONS(53, 4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */ ALTERNATE_FUNCTIONS(54, 5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54, altA controlled by bit 5 */ ALTERNATE_FUNCTIONS(55, 6, UNUSED, UNUSED, 0, 0, 0), /* GPIO55, altA controlled by bit 6 */ ALTERNATE_FUNCTIONS(56, 7, UNUSED, UNUSED, 0, 0, 0), /* GPIO56, altA controlled by bit 7 */ }; static struct pullud ab8540_pullud = { .first_pin = 51, /* GPIO1_VBAT */ .last_pin = 54, /* GPIO4_VBAT */ }; /* * For AB8540 Only some GPIOs are interrupt capable: * GPIO43 to GPIO44 * GPIO51 to GPIO54 */ static struct abx500_gpio_irq_cluster ab8540_gpio_irq_cluster[] = { GPIO_IRQ_CLUSTER(43, 43, AB8540_INT_GPIO43F), GPIO_IRQ_CLUSTER(44, 44, AB8540_INT_GPIO44F), GPIO_IRQ_CLUSTER(51, 54, AB9540_INT_GPIO51R), }; static struct abx500_pinctrl_soc_data ab8540_soc = { .gpio_ranges = ab8540_pinranges, .gpio_num_ranges = ARRAY_SIZE(ab8540_pinranges), .pins = ab8540_pins, .npins = ARRAY_SIZE(ab8540_pins), .functions = ab8540_functions, .nfunctions = ARRAY_SIZE(ab8540_functions), .groups = ab8540_groups, .ngroups = ARRAY_SIZE(ab8540_groups), .alternate_functions = ab8540_alternate_functions, .pullud = &ab8540_pullud, .gpio_irq_cluster = ab8540_gpio_irq_cluster, .ngpio_irq_cluster = ARRAY_SIZE(ab8540_gpio_irq_cluster), .irq_gpio_rising_offset = AB8540_INT_GPIO43R, .irq_gpio_falling_offset = AB8540_INT_GPIO43F, .irq_gpio_factor = 2, }; void abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc) { *soc = &ab8540_soc; }
gpl-2.0
tarunkapadia93/gk_armani
arch/mips/cavium-octeon/executive/cvmx-helper-util.c
4778
12643
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Small helper utilities. */ #include <linux/kernel.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-config.h> #include <asm/octeon/cvmx-fpa.h> #include <asm/octeon/cvmx-pip.h> #include <asm/octeon/cvmx-pko.h> #include <asm/octeon/cvmx-ipd.h> #include <asm/octeon/cvmx-spi.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-helper-util.h> #include <asm/octeon/cvmx-ipd-defs.h> /** * Convert a interface mode into a human readable string * * @mode: Mode to convert * * Returns String */ const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode) { switch (mode) { case CVMX_HELPER_INTERFACE_MODE_DISABLED: return "DISABLED"; case CVMX_HELPER_INTERFACE_MODE_RGMII: return "RGMII"; case CVMX_HELPER_INTERFACE_MODE_GMII: return "GMII"; case CVMX_HELPER_INTERFACE_MODE_SPI: return "SPI"; case CVMX_HELPER_INTERFACE_MODE_PCIE: return "PCIE"; case CVMX_HELPER_INTERFACE_MODE_XAUI: return "XAUI"; case CVMX_HELPER_INTERFACE_MODE_SGMII: return "SGMII"; case CVMX_HELPER_INTERFACE_MODE_PICMG: return "PICMG"; case CVMX_HELPER_INTERFACE_MODE_NPI: return "NPI"; case CVMX_HELPER_INTERFACE_MODE_LOOP: return "LOOP"; } return "UNKNOWN"; } /** * Debug routine to dump the packet structure to the console * * @work: Work queue entry containing the packet to dump * Returns */ int cvmx_helper_dump_packet(cvmx_wqe_t *work) { uint64_t count; uint64_t remaining_bytes; union cvmx_buf_ptr buffer_ptr; uint64_t start_of_buffer; uint8_t *data_address; uint8_t *end_of_data; cvmx_dprintf("Packet Length: %u\n", work->len); cvmx_dprintf(" Input Port: %u\n", work->ipprt); cvmx_dprintf(" QoS: %u\n", work->qos); cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs); if (work->word2.s.bufs == 0) { union cvmx_ipd_wqe_fpa_queue wqe_pool; wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE); buffer_ptr.u64 = 0; buffer_ptr.s.pool = wqe_pool.s.wqe_pool; buffer_ptr.s.size = 128; buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data); if (likely(!work->word2.s.not_IP)) { union cvmx_pip_ip_offset pip_ip_offset; pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET); buffer_ptr.s.addr += (pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset; buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2; } else { /* * WARNING: This code assumes that the packet * is not RAW. If it was, we would use * PIP_GBL_CFG[RAW_SHF] instead of * PIP_GBL_CFG[NIP_SHF]. */ union cvmx_pip_gbl_cfg pip_gbl_cfg; pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG); buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf; } } else buffer_ptr = work->packet_ptr; remaining_bytes = work->len; while (remaining_bytes) { start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7; cvmx_dprintf(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer); cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i); cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back); cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool); cvmx_dprintf(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr); cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size); cvmx_dprintf("\t\t"); data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr); end_of_data = data_address + buffer_ptr.s.size; count = 0; while (data_address < end_of_data) { if (remaining_bytes == 0) break; else remaining_bytes--; cvmx_dprintf("%02x", (unsigned int)*data_address); data_address++; if (remaining_bytes && (count == 7)) { cvmx_dprintf("\n\t\t"); count = 0; } else count++; } cvmx_dprintf("\n"); if (remaining_bytes) buffer_ptr = *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8); } return 0; } /** * Setup Random Early Drop on a specific input queue * * @queue: Input queue to setup RED on (0-7) * @pass_thresh: * Packets will begin slowly dropping when there are less than * this many packet buffers free in FPA 0. * @drop_thresh: * All incomming packets will be dropped when there are less * than this many free packet buffers in FPA 0. * Returns Zero on success. Negative on failure */ int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) { union cvmx_ipd_qosx_red_marks red_marks; union cvmx_ipd_red_quex_param red_param; /* Set RED to begin dropping packets when there are pass_thresh buffers left. It will linearly drop more packets until reaching drop_thresh buffers */ red_marks.u64 = 0; red_marks.s.drop = drop_thresh; red_marks.s.pass = pass_thresh; cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); /* Use the actual queue 0 counter, not the average */ red_param.u64 = 0; red_param.s.prb_con = (255ul << 24) / (red_marks.s.pass - red_marks.s.drop); red_param.s.avg_con = 1; red_param.s.new_con = 255; red_param.s.use_pcnt = 1; cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); return 0; } /** * Setup Random Early Drop to automatically begin dropping packets. * * @pass_thresh: * Packets will begin slowly dropping when there are less than * this many packet buffers free in FPA 0. * @drop_thresh: * All incomming packets will be dropped when there are less * than this many free packet buffers in FPA 0. * Returns Zero on success. Negative on failure */ int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) { union cvmx_ipd_portx_bp_page_cnt page_cnt; union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end; union cvmx_ipd_red_port_enable red_port_enable; int queue; int interface; int port; /* Disable backpressure based on queued buffers. It needs SW support */ page_cnt.u64 = 0; page_cnt.s.bp_enb = 0; page_cnt.s.page_cnt = 100; for (interface = 0; interface < 2; interface++) { for (port = cvmx_helper_get_first_ipd_port(interface); port < cvmx_helper_get_last_ipd_port(interface); port++) cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port), page_cnt.u64); } for (queue = 0; queue < 8; queue++) cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); /* Shutoff the dropping based on the per port page count. SW isn't decrementing it right now */ ipd_bp_prt_red_end.u64 = 0; ipd_bp_prt_red_end.s.prt_enb = 0; cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64); red_port_enable.u64 = 0; red_port_enable.s.prt_enb = 0xfffffffffull; red_port_enable.s.avg_dly = 10000; red_port_enable.s.prb_dly = 10000; cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64); return 0; } /** * Setup the common GMX settings that determine the number of * ports. These setting apply to almost all configurations of all * chips. * * @interface: Interface to configure * @num_ports: Number of ports on the interface * * Returns Zero on success, negative on failure */ int __cvmx_helper_setup_gmx(int interface, int num_ports) { union cvmx_gmxx_tx_prts gmx_tx_prts; union cvmx_gmxx_rx_prts gmx_rx_prts; union cvmx_pko_reg_gmx_port_mode pko_mode; union cvmx_gmxx_txx_thresh gmx_tx_thresh; int index; /* Tell GMX the number of TX ports on this interface */ gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface)); gmx_tx_prts.s.prts = num_ports; cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64); /* Tell GMX the number of RX ports on this interface. This only ** applies to *GMII and XAUI ports */ if (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_RGMII || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_SGMII || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_GMII || cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_XAUI) { if (num_ports > 4) { cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal " "num_ports\n"); return -1; } gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface)); gmx_rx_prts.s.prts = num_ports; cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64); } /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */ if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) && !OCTEON_IS_MODEL(OCTEON_CN50XX)) { /* Tell PKO the number of ports on this interface */ pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE); if (interface == 0) { if (num_ports == 1) pko_mode.s.mode0 = 4; else if (num_ports == 2) pko_mode.s.mode0 = 3; else if (num_ports <= 4) pko_mode.s.mode0 = 2; else if (num_ports <= 8) pko_mode.s.mode0 = 1; else pko_mode.s.mode0 = 0; } else { if (num_ports == 1) pko_mode.s.mode1 = 4; else if (num_ports == 2) pko_mode.s.mode1 = 3; else if (num_ports <= 4) pko_mode.s.mode1 = 2; else if (num_ports <= 8) pko_mode.s.mode1 = 1; else pko_mode.s.mode1 = 0; } cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64); } /* * Set GMX to buffer as much data as possible before starting * transmit. This reduces the chances that we have a TX under * run due to memory contention. Any packet that fits entirely * in the GMX FIFO can never have an under run regardless of * memory load. */ gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface)); if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) { /* These chips have a fixed max threshold of 0x40 */ gmx_tx_thresh.s.cnt = 0x40; } else { /* Choose the max value for the number of ports */ if (num_ports <= 1) gmx_tx_thresh.s.cnt = 0x100 / 1; else if (num_ports == 2) gmx_tx_thresh.s.cnt = 0x100 / 2; else gmx_tx_thresh.s.cnt = 0x100 / 4; } /* * SPI and XAUI can have lots of ports but the GMX hardware * only ever has a max of 4. */ if (num_ports > 4) num_ports = 4; for (index = 0; index < num_ports; index++) cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface), gmx_tx_thresh.u64); return 0; } /** * Returns the IPD/PKO port number for a port on the given * interface. * * @interface: Interface to use * @port: Port on the interface * * Returns IPD/PKO port number */ int cvmx_helper_get_ipd_port(int interface, int port) { switch (interface) { case 0: return port; case 1: return port + 16; case 2: return port + 32; case 3: return port + 36; } return -1; } /** * Returns the interface number for an IPD/PKO port number. * * @ipd_port: IPD/PKO port number * * Returns Interface number */ int cvmx_helper_get_interface_num(int ipd_port) { if (ipd_port < 16) return 0; else if (ipd_port < 32) return 1; else if (ipd_port < 36) return 2; else if (ipd_port < 40) return 3; else cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD " "port number\n"); return -1; } /** * Returns the interface index number for an IPD/PKO port * number. * * @ipd_port: IPD/PKO port number * * Returns Interface index number */ int cvmx_helper_get_interface_index_num(int ipd_port) { if (ipd_port < 32) return ipd_port & 15; else if (ipd_port < 36) return ipd_port & 3; else if (ipd_port < 40) return ipd_port & 3; else cvmx_dprintf("cvmx_helper_get_interface_index_num: " "Illegal IPD port number\n"); return -1; }
gpl-2.0
jamison904/T999_TW_kernel
arch/m68k/sun3/intersil.c
4778
1746
/* * arch/m68k/sun3/intersil.c * * basic routines for accessing the intersil clock within the sun3 machines * * started 11/12/1999 Sam Creasey * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/rtc.h> #include <asm/errno.h> #include <asm/system.h> #include <asm/rtc.h> #include <asm/intersil.h> /* bits to set for start/run of the intersil */ #define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE) #define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE) /* does this need to be implemented? */ unsigned long sun3_gettimeoffset(void) { return 1; } /* get/set hwclock */ int sun3_hwclk(int set, struct rtc_time *t) { volatile struct intersil_dt *todintersil; unsigned long flags; todintersil = (struct intersil_dt *) &intersil_clock->counter; local_irq_save(flags); intersil_clock->cmd_reg = STOP_VAL; /* set or read the clock */ if(set) { todintersil->csec = 0; todintersil->hour = t->tm_hour; todintersil->minute = t->tm_min; todintersil->second = t->tm_sec; todintersil->month = t->tm_mon; todintersil->day = t->tm_mday; todintersil->year = t->tm_year - 68; todintersil->weekday = t->tm_wday; } else { /* read clock */ t->tm_sec = todintersil->csec; t->tm_hour = todintersil->hour; t->tm_min = todintersil->minute; t->tm_sec = todintersil->second; t->tm_mon = todintersil->month; t->tm_mday = todintersil->day; t->tm_year = todintersil->year + 68; t->tm_wday = todintersil->weekday; } intersil_clock->cmd_reg = START_VAL; local_irq_restore(flags); return 0; }
gpl-2.0
SystemTera/SystemTera.Server-V-3.2-Kernel
drivers/rtc/rtc-at32ap700x.c
5802
7505
/* * An RTC driver for the AVR32 AT32AP700x processor series. * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/rtc.h> #include <linux/io.h> /* * This is a bare-bones RTC. It runs during most system sleep states, but has * no battery backup and gets reset during system restart. It must be * initialized from an external clock (network, I2C, etc) before it can be of * much use. * * The alarm functionality is limited by the hardware, not supporting * periodic interrupts. */ #define RTC_CTRL 0x00 #define RTC_CTRL_EN 0 #define RTC_CTRL_PCLR 1 #define RTC_CTRL_TOPEN 2 #define RTC_CTRL_PSEL 8 #define RTC_VAL 0x04 #define RTC_TOP 0x08 #define RTC_IER 0x10 #define RTC_IER_TOPI 0 #define RTC_IDR 0x14 #define RTC_IDR_TOPI 0 #define RTC_IMR 0x18 #define RTC_IMR_TOPI 0 #define RTC_ISR 0x1c #define RTC_ISR_TOPI 0 #define RTC_ICR 0x20 #define RTC_ICR_TOPI 0 #define RTC_BIT(name) (1 << RTC_##name) #define RTC_BF(name, value) ((value) << RTC_##name) #define rtc_readl(dev, reg) \ __raw_readl((dev)->regs + RTC_##reg) #define rtc_writel(dev, reg, value) \ __raw_writel((value), (dev)->regs + RTC_##reg) struct rtc_at32ap700x { struct rtc_device *rtc; void __iomem *regs; unsigned long alarm_time; unsigned long irq; /* Protect against concurrent register access. */ spinlock_t lock; }; static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; now = rtc_readl(rtc, VAL); rtc_time_to_tm(now, tm); return 0; } static int at32_rtc_settime(struct device *dev, struct rtc_time *tm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long now; int ret; ret = rtc_tm_to_time(tm, &now); if (ret == 0) rtc_writel(rtc, VAL, now); return ret; } static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); spin_lock_irq(&rtc->lock); rtc_time_to_tm(rtc->alarm_time, &alrm->time); alrm->enabled = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0; alrm->pending = rtc_readl(rtc, ISR) & RTC_BIT(ISR_TOPI) ? 1 : 0; spin_unlock_irq(&rtc->lock); return 0; } static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); unsigned long rtc_unix_time; unsigned long alarm_unix_time; int ret; rtc_unix_time = rtc_readl(rtc, VAL); ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time); if (ret) return ret; if (alarm_unix_time < rtc_unix_time) return -EINVAL; spin_lock_irq(&rtc->lock); rtc->alarm_time = alarm_unix_time; rtc_writel(rtc, TOP, rtc->alarm_time); if (alrm->enabled) rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); else rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); spin_unlock_irq(&rtc->lock); return ret; } static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); int ret = 0; spin_lock_irq(&rtc->lock); if(enabled) { if (rtc_readl(rtc, VAL) > rtc->alarm_time) { ret = -EINVAL; goto out; } rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); } else { rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); } out: spin_unlock_irq(&rtc->lock); return ret; } static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) { struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id; unsigned long isr = rtc_readl(rtc, ISR); unsigned long events = 0; int ret = IRQ_NONE; spin_lock(&rtc->lock); if (isr & RTC_BIT(ISR_TOPI)) { rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) & ~RTC_BIT(CTRL_TOPEN)); rtc_writel(rtc, VAL, rtc->alarm_time); events = RTC_AF | RTC_IRQF; rtc_update_irq(rtc->rtc, 1, events); ret = IRQ_HANDLED; } spin_unlock(&rtc->lock); return ret; } static struct rtc_class_ops at32_rtc_ops = { .read_time = at32_rtc_readtime, .set_time = at32_rtc_settime, .read_alarm = at32_rtc_readalarm, .set_alarm = at32_rtc_setalarm, .alarm_irq_enable = at32_rtc_alarm_irq_enable, }; static int __init at32_rtc_probe(struct platform_device *pdev) { struct resource *regs; struct rtc_at32ap700x *rtc; int irq; int ret; rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL); if (!rtc) { dev_dbg(&pdev->dev, "out of memory\n"); return -ENOMEM; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no mmio resource defined\n"); ret = -ENXIO; goto out; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_dbg(&pdev->dev, "could not get irq\n"); ret = -ENXIO; goto out; } rtc->irq = irq; rtc->regs = ioremap(regs->start, resource_size(regs)); if (!rtc->regs) { ret = -ENOMEM; dev_dbg(&pdev->dev, "could not map I/O memory\n"); goto out; } spin_lock_init(&rtc->lock); /* * Maybe init RTC: count from zero at 1 Hz, disable wrap irq. * * Do not reset VAL register, as it can hold an old time * from last JTAG reset. */ if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) { rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR)); rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe) | RTC_BIT(CTRL_EN)); } ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc); if (ret) { dev_dbg(&pdev->dev, "could not request irq %d\n", irq); goto out_iounmap; } platform_set_drvdata(pdev, rtc); rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &at32_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { dev_dbg(&pdev->dev, "could not register rtc device\n"); ret = PTR_ERR(rtc->rtc); goto out_free_irq; } device_init_wakeup(&pdev->dev, 1); dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n", (unsigned long)rtc->regs, rtc->irq); return 0; out_free_irq: platform_set_drvdata(pdev, NULL); free_irq(irq, rtc); out_iounmap: iounmap(rtc->regs); out: kfree(rtc); return ret; } static int __exit at32_rtc_remove(struct platform_device *pdev) { struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); free_irq(rtc->irq, rtc); iounmap(rtc->regs); rtc_device_unregister(rtc->rtc); kfree(rtc); platform_set_drvdata(pdev, NULL); return 0; } MODULE_ALIAS("platform:at32ap700x_rtc"); static struct platform_driver at32_rtc_driver = { .remove = __exit_p(at32_rtc_remove), .driver = { .name = "at32ap700x_rtc", .owner = THIS_MODULE, }, }; static int __init at32_rtc_init(void) { return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe); } module_init(at32_rtc_init); static void __exit at32_rtc_exit(void) { platform_driver_unregister(&at32_rtc_driver); } module_exit(at32_rtc_exit); MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>"); MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x"); MODULE_LICENSE("GPL");
gpl-2.0
sparkma/kernel
drivers/misc/sgi-gru/grufile.c
8106
15280
/* * SN Platform GRU Driver * * FILE OPERATIONS & DRIVER INITIALIZATION * * This file supports the user system call for file open, close, mmap, etc. * This also incudes the driver initialization code. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #ifdef CONFIG_X86_64 #include <asm/uv/uv_irq.h> #endif #include <asm/uv/uv.h> #include "gru.h" #include "grulib.h" #include "grutables.h" #include <asm/uv/uv_hub.h> #include <asm/uv/uv_mmrs.h> struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; unsigned long gru_start_paddr __read_mostly; void *gru_start_vaddr __read_mostly; unsigned long gru_end_paddr __read_mostly; unsigned int gru_max_gids __read_mostly; struct gru_stats_s gru_stats; /* Guaranteed user available resources on each node */ static int max_user_cbrs, max_user_dsr_bytes; static struct miscdevice gru_miscdev; /* * gru_vma_close * * Called when unmapping a device mapping. Frees all gru resources * and tables belonging to the vma. */ static void gru_vma_close(struct vm_area_struct *vma) { struct gru_vma_data *vdata; struct gru_thread_state *gts; struct list_head *entry, *next; if (!vma->vm_private_data) return; vdata = vma->vm_private_data; vma->vm_private_data = NULL; gru_dbg(grudev, "vma %p, file %p, vdata %p\n", vma, vma->vm_file, vdata); list_for_each_safe(entry, next, &vdata->vd_head) { gts = list_entry(entry, struct gru_thread_state, ts_next); list_del(&gts->ts_next); mutex_lock(&gts->ts_ctxlock); if (gts->ts_gru) gru_unload_context(gts, 0); mutex_unlock(&gts->ts_ctxlock); gts_drop(gts); } kfree(vdata); STAT(vdata_free); } /* * gru_file_mmap * * Called when mmapping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) != (VM_SHARED | VM_WRITE)) return -EPERM; if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) return -EINVAL; vma->vm_flags |= (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP | VM_RESERVED); vma->vm_page_prot = PAGE_SHARED; vma->vm_ops = &gru_vm_ops; vma->vm_private_data = gru_alloc_vma_data(vma, 0); if (!vma->vm_private_data) return -ENOMEM; gru_dbg(grudev, "file %p, vaddr 0x%lx, vma %p, vdata %p\n", file, vma->vm_start, vma, vma->vm_private_data); return 0; } /* * Create a new GRU context */ static int gru_create_new_context(unsigned long arg) { struct gru_create_context_req req; struct vm_area_struct *vma; struct gru_vma_data *vdata; int ret = -EINVAL; if (copy_from_user(&req, (void __user *)arg, sizeof(req))) return -EFAULT; if (req.data_segment_bytes > max_user_dsr_bytes) return -EINVAL; if (req.control_blocks > max_user_cbrs || !req.maximum_thread_count) return -EINVAL; if (!(req.options & GRU_OPT_MISS_MASK)) req.options |= GRU_OPT_MISS_FMM_INTR; down_write(&current->mm->mmap_sem); vma = gru_find_vma(req.gseg); if (vma) { vdata = vma->vm_private_data; vdata->vd_user_options = req.options; vdata->vd_dsr_au_count = GRU_DS_BYTES_TO_AU(req.data_segment_bytes); vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); vdata->vd_tlb_preload_count = req.tlb_preload_count; ret = 0; } up_write(&current->mm->mmap_sem); return ret; } /* * Get GRU configuration info (temp - for emulator testing) */ static long gru_get_config_info(unsigned long arg) { struct gru_config_info info; int nodesperblade; if (num_online_nodes() > 1 && (uv_node_to_blade_id(1) == uv_node_to_blade_id(0))) nodesperblade = 2; else nodesperblade = 1; info.cpus = num_online_cpus(); info.nodes = num_online_nodes(); info.blades = info.nodes / nodesperblade; info.chiplets = GRU_CHIPLETS_PER_BLADE * info.blades; if (copy_to_user((void __user *)arg, &info, sizeof(info))) return -EFAULT; return 0; } /* * gru_file_unlocked_ioctl * * Called to update file attributes via IOCTL calls. */ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req, unsigned long arg) { int err = -EBADRQC; gru_dbg(grudev, "file %p, req 0x%x, 0x%lx\n", file, req, arg); switch (req) { case GRU_CREATE_CONTEXT: err = gru_create_new_context(arg); break; case GRU_SET_CONTEXT_OPTION: err = gru_set_context_option(arg); break; case GRU_USER_GET_EXCEPTION_DETAIL: err = gru_get_exception_detail(arg); break; case GRU_USER_UNLOAD_CONTEXT: err = gru_user_unload_context(arg); break; case GRU_USER_FLUSH_TLB: err = gru_user_flush_tlb(arg); break; case GRU_USER_CALL_OS: err = gru_handle_user_call_os(arg); break; case GRU_GET_GSEG_STATISTICS: err = gru_get_gseg_statistics(arg); break; case GRU_KTEST: err = gru_ktest(arg); break; case GRU_GET_CONFIG_INFO: err = gru_get_config_info(arg); break; case GRU_DUMP_CHIPLET_STATE: err = gru_dump_chiplet_request(arg); break; } return err; } /* * Called at init time to build tables for all GRUs that are present in the * system. */ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, void *vaddr, int blade_id, int chiplet_id) { spin_lock_init(&gru->gs_lock); spin_lock_init(&gru->gs_asid_lock); gru->gs_gru_base_paddr = paddr; gru->gs_gru_base_vaddr = vaddr; gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id; gru->gs_blade = gru_base[blade_id]; gru->gs_blade_id = blade_id; gru->gs_chiplet_id = chiplet_id; gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; gru->gs_asid_limit = MAX_ASID; gru_tgh_flush_init(gru); if (gru->gs_gid >= gru_max_gids) gru_max_gids = gru->gs_gid + 1; gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n", blade_id, gru->gs_gid, gru->gs_gru_base_vaddr, gru->gs_gru_base_paddr); } static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) { int pnode, nid, bid, chip; int cbrs, dsrbytes, n; int order = get_order(sizeof(struct gru_blade_state)); struct page *page; struct gru_state *gru; unsigned long paddr; void *vaddr; max_user_cbrs = GRU_NUM_CB; max_user_dsr_bytes = GRU_NUM_DSR_BYTES; for_each_possible_blade(bid) { pnode = uv_blade_to_pnode(bid); nid = uv_blade_to_memory_nid(bid);/* -1 if no memory on blade */ page = alloc_pages_node(nid, GFP_KERNEL, order); if (!page) goto fail; gru_base[bid] = page_address(page); memset(gru_base[bid], 0, sizeof(struct gru_blade_state)); gru_base[bid]->bs_lru_gru = &gru_base[bid]->bs_grus[0]; spin_lock_init(&gru_base[bid]->bs_lock); init_rwsem(&gru_base[bid]->bs_kgts_sema); dsrbytes = 0; cbrs = 0; for (gru = gru_base[bid]->bs_grus, chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++, gru++) { paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); gru_init_chiplet(gru, paddr, vaddr, bid, chip); n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; cbrs = max(cbrs, n); n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; dsrbytes = max(dsrbytes, n); } max_user_cbrs = min(max_user_cbrs, cbrs); max_user_dsr_bytes = min(max_user_dsr_bytes, dsrbytes); } return 0; fail: for (bid--; bid >= 0; bid--) free_pages((unsigned long)gru_base[bid], order); return -ENOMEM; } static void gru_free_tables(void) { int bid; int order = get_order(sizeof(struct gru_state) * GRU_CHIPLETS_PER_BLADE); for (bid = 0; bid < GRU_MAX_BLADES; bid++) free_pages((unsigned long)gru_base[bid], order); } static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) { unsigned long mmr = 0; int core; /* * We target the cores of a blade and not the hyperthreads themselves. * There is a max of 8 cores per socket and 2 sockets per blade, * making for a max total of 16 cores (i.e., 16 CPUs without * hyperthreading and 32 CPUs with hyperthreading). */ core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu)) return 0; if (chiplet == 0) { mmr = UVH_GR0_TLB_INT0_CONFIG + core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG); } else if (chiplet == 1) { mmr = UVH_GR1_TLB_INT0_CONFIG + core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG); } else { BUG(); } *corep = core; return mmr; } #ifdef CONFIG_IA64 static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; static void gru_noop(struct irq_data *d) { } static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { [0 ... GRU_CHIPLETS_PER_BLADE - 1] { .irq_mask = gru_noop, .irq_unmask = gru_noop, .irq_ack = gru_noop } }; static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, irq_handler_t irq_handler, int cpu, int blade) { unsigned long mmr; int irq = IRQ_GRU + chiplet; int ret, core; mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); if (mmr == 0) return 0; if (gru_irq_count[chiplet] == 0) { gru_chip[chiplet].name = irq_name; ret = irq_set_chip(irq, &gru_chip[chiplet]); if (ret) { printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n", GRU_DRIVER_ID_STR, -ret); return ret; } ret = request_irq(irq, irq_handler, 0, irq_name, NULL); if (ret) { printk(KERN_ERR "%s: request_irq failed, errno=%d\n", GRU_DRIVER_ID_STR, -ret); return ret; } } gru_irq_count[chiplet]++; return 0; } static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) { unsigned long mmr; int core, irq = IRQ_GRU + chiplet; if (gru_irq_count[chiplet] == 0) return; mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); if (mmr == 0) return; if (--gru_irq_count[chiplet] == 0) free_irq(irq, NULL); } #elif defined CONFIG_X86_64 static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name, irq_handler_t irq_handler, int cpu, int blade) { unsigned long mmr; int irq, core; int ret; mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); if (mmr == 0) return 0; irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU); if (irq < 0) { printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n", GRU_DRIVER_ID_STR, -irq); return irq; } ret = request_irq(irq, irq_handler, 0, irq_name, NULL); if (ret) { uv_teardown_irq(irq); printk(KERN_ERR "%s: request_irq failed, errno=%d\n", GRU_DRIVER_ID_STR, -ret); return ret; } gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq; return 0; } static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade) { int irq, core; unsigned long mmr; mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core); if (mmr) { irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core]; if (irq) { free_irq(irq, NULL); uv_teardown_irq(irq); } } } #endif static void gru_teardown_tlb_irqs(void) { int blade; int cpu; for_each_online_cpu(cpu) { blade = uv_cpu_to_blade_id(cpu); gru_chiplet_teardown_tlb_irq(0, cpu, blade); gru_chiplet_teardown_tlb_irq(1, cpu, blade); } for_each_possible_blade(blade) { if (uv_blade_nr_possible_cpus(blade)) continue; gru_chiplet_teardown_tlb_irq(0, 0, blade); gru_chiplet_teardown_tlb_irq(1, 0, blade); } } static int gru_setup_tlb_irqs(void) { int blade; int cpu; int ret; for_each_online_cpu(cpu) { blade = uv_cpu_to_blade_id(cpu); ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade); if (ret != 0) goto exit1; ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade); if (ret != 0) goto exit1; } for_each_possible_blade(blade) { if (uv_blade_nr_possible_cpus(blade)) continue; ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade); if (ret != 0) goto exit1; ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade); if (ret != 0) goto exit1; } return 0; exit1: gru_teardown_tlb_irqs(); return ret; } /* * gru_init * * Called at boot or module load time to initialize the GRUs. */ static int __init gru_init(void) { int ret; if (!is_uv_system()) return 0; #if defined CONFIG_IA64 gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ #else gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & 0x7fffffffffffUL; #endif gru_start_vaddr = __va(gru_start_paddr); gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", gru_start_paddr, gru_end_paddr); ret = misc_register(&gru_miscdev); if (ret) { printk(KERN_ERR "%s: misc_register failed\n", GRU_DRIVER_ID_STR); goto exit0; } ret = gru_proc_init(); if (ret) { printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); goto exit1; } ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); if (ret) { printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); goto exit2; } ret = gru_setup_tlb_irqs(); if (ret != 0) goto exit3; gru_kservices_init(); printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, GRU_DRIVER_VERSION_STR); return 0; exit3: gru_free_tables(); exit2: gru_proc_exit(); exit1: misc_deregister(&gru_miscdev); exit0: return ret; } static void __exit gru_exit(void) { if (!is_uv_system()) return; gru_teardown_tlb_irqs(); gru_kservices_exit(); gru_free_tables(); misc_deregister(&gru_miscdev); gru_proc_exit(); } static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, .llseek = noop_llseek, }; static struct miscdevice gru_miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "gru", .fops = &gru_fops, }; const struct vm_operations_struct gru_vm_ops = { .close = gru_vma_close, .fault = gru_fault, }; #ifndef MODULE fs_initcall(gru_init); #else module_init(gru_init); #endif module_exit(gru_exit); module_param(gru_options, ulong, 0644); MODULE_PARM_DESC(gru_options, "Various debug options"); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR); MODULE_VERSION(GRU_DRIVER_VERSION_STR);
gpl-2.0
TeamRegular/android_kernel_nvidia_shieldtablet
arch/mips/powertv/time.c
10666
1134
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * Portions copyright (C) 2009 Cisco Systems, Inc. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Setting up the clock on the MIPS boards. */ #include <linux/init.h> #include <asm/mach-powertv/interrupts.h> #include <asm/time.h> #include "powertv-clock.h" unsigned int __cpuinit get_c0_compare_int(void) { return irq_mips_timer; } void __init plat_time_init(void) { powertv_clocksource_init(); }
gpl-2.0
MoKee/android_kernel_htc_dlx
arch/m32r/mm/ioremap-nommu.c
13994
1282
/* * linux/arch/m32r/mm/ioremap-nommu.c * * Copyright (c) 2001, 2002 Hiroyuki Kondo * * Taken from mips version. * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2001 Ralf Baechle */ /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { return (void *)phys_addr; } #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) void iounmap(volatile void __iomem *addr) { }
gpl-2.0
ystk/sched-deadline
arch/m32r/mm/ioremap-nommu.c
13994
1282
/* * linux/arch/m32r/mm/ioremap-nommu.c * * Copyright (c) 2001, 2002 Hiroyuki Kondo * * Taken from mips version. * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2001 Ralf Baechle */ /* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { return (void *)phys_addr; } #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) void iounmap(volatile void __iomem *addr) { }
gpl-2.0
gohai/linux-vc4
arch/frv/lib/checksum.c
13994
4201
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IP/TCP/UDP checksumming routines * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Tom May, <ftom@netcom.com> * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> * Lots of code moved from tcp.c and ip.c; see those files * for more names. * * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: * Fixed some nasty bugs, causing some horrible crashes. * A: At some points, the sum (%0) was used as * length-counter instead of the length counter * (%1). Thanks to Roman Hodek for pointing this out. * B: GCC seems to mess up if one uses too many * data-registers to hold input values and one tries to * specify d0 and d1 as scratch registers. Letting gcc choose these * registers itself solves the problem. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most of the assembly has to go. */ #include <net/checksum.h> #include <linux/module.h> static inline unsigned short from32to16(unsigned long x) { /* add up 16-bit and 16-bit for 16+c bit */ x = (x & 0xffff) + (x >> 16); /* add up carry.. */ x = (x & 0xffff) + (x >> 16); return x; } static unsigned long do_csum(const unsigned char * buff, int len) { int odd, count; unsigned long result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = *buff; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { unsigned long carry = 0; do { unsigned long w = *(unsigned long *) buff; count--; buff += 4; result += carry; result += w; carry = (w > result); } while (count); result += carry; result = (result & 0xffff) + (result >> 16); } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += (*buff << 8); result = from32to16(result); if (odd) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); out: return result; } /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned int result = do_csum(buff, len); /* add in old sum, and carry.. */ result += (__force u32)sum; if ((__force u32)sum > result) result += 1; return (__force __wsum)result; } EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~do_csum(buff, len); } EXPORT_SYMBOL(ip_compute_csum); /* * copy from fs while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err) { int rem; if (csum_err) *csum_err = 0; rem = copy_from_user(dst, src, len); if (rem != 0) { if (csum_err) *csum_err = -EFAULT; memset(dst + len - rem, 0, rem); len = rem; } return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_from_user); /* * copy from ds while checksumming, otherwise like csum_partial */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { memcpy(dst, src, len); return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_nocheck);
gpl-2.0
sancome/linux-3.x
arch/arm/mach-pxa/tavorevb.c
171
11371
/* * linux/arch/arm/mach-pxa/tavorevb.c * * Support for the Marvell PXA930 Evaluation Board * * Copyright (C) 2007-2008 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/smc91x.h> #include <linux/pwm_backlight.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/pxa930.h> #include <mach/pxafb.h> #include <plat/pxa27x_keypad.h> #include "devices.h" #include "generic.h" /* Tavor EVB MFP configurations */ static mfp_cfg_t tavorevb_mfp_cfg[] __initdata = { /* Ethernet */ DF_nCS1_nCS3, GPIO47_GPIO, /* LCD */ GPIO23_LCD_DD0, GPIO24_LCD_DD1, GPIO25_LCD_DD2, GPIO26_LCD_DD3, GPIO27_LCD_DD4, GPIO28_LCD_DD5, GPIO29_LCD_DD6, GPIO44_LCD_DD7, GPIO21_LCD_CS, GPIO22_LCD_CS2, GPIO17_LCD_FCLK_RD, GPIO18_LCD_LCLK_A0, GPIO19_LCD_PCLK_WR, /* LCD Backlight */ GPIO43_PWM3, /* primary backlight */ GPIO32_PWM0, /* secondary backlight */ /* Keypad */ GPIO0_KP_MKIN_0, GPIO2_KP_MKIN_1, GPIO4_KP_MKIN_2, GPIO6_KP_MKIN_3, GPIO8_KP_MKIN_4, GPIO10_KP_MKIN_5, GPIO12_KP_MKIN_6, GPIO1_KP_MKOUT_0, GPIO3_KP_MKOUT_1, GPIO5_KP_MKOUT_2, GPIO7_KP_MKOUT_3, GPIO9_KP_MKOUT_4, GPIO11_KP_MKOUT_5, GPIO13_KP_MKOUT_6, GPIO14_KP_DKIN_2, GPIO15_KP_DKIN_3, }; #define TAVOREVB_ETH_PHYS (0x14000000) static struct resource smc91x_resources[] = { [0] = { .start = (TAVOREVB_ETH_PHYS + 0x300), .end = (TAVOREVB_ETH_PHYS + 0xfffff), .flags = IORESOURCE_MEM, }, [1] = { .start = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), .end = gpio_to_irq(mfp_to_gpio(MFP_PIN_GPIO47)), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct smc91x_platdata tavorevb_smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &tavorevb_smc91x_info, }, }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) static unsigned int tavorevb_matrix_key_map[] = { /* KEY(row, col, key_code) */ KEY(0, 4, KEY_A), KEY(0, 5, KEY_B), KEY(0, 6, KEY_C), KEY(1, 4, KEY_E), KEY(1, 5, KEY_F), KEY(1, 6, KEY_G), KEY(2, 4, KEY_I), KEY(2, 5, KEY_J), KEY(2, 6, KEY_K), KEY(3, 4, KEY_M), KEY(3, 5, KEY_N), KEY(3, 6, KEY_O), KEY(4, 5, KEY_R), KEY(4, 6, KEY_S), KEY(5, 4, KEY_U), KEY(5, 4, KEY_V), KEY(5, 6, KEY_W), KEY(6, 4, KEY_Y), KEY(6, 5, KEY_Z), KEY(0, 3, KEY_0), KEY(2, 0, KEY_1), KEY(2, 1, KEY_2), KEY(2, 2, KEY_3), KEY(2, 3, KEY_4), KEY(1, 0, KEY_5), KEY(1, 1, KEY_6), KEY(1, 2, KEY_7), KEY(1, 3, KEY_8), KEY(0, 2, KEY_9), KEY(6, 6, KEY_SPACE), KEY(0, 0, KEY_KPASTERISK), /* * */ KEY(0, 1, KEY_KPDOT), /* # */ KEY(4, 1, KEY_UP), KEY(4, 3, KEY_DOWN), KEY(4, 0, KEY_LEFT), KEY(4, 2, KEY_RIGHT), KEY(6, 0, KEY_HOME), KEY(3, 2, KEY_END), KEY(6, 1, KEY_DELETE), KEY(5, 2, KEY_BACK), KEY(6, 3, KEY_CAPSLOCK), /* KEY_LEFTSHIFT), */ KEY(4, 4, KEY_ENTER), /* scroll push */ KEY(6, 2, KEY_ENTER), /* keypad action */ KEY(3, 1, KEY_SEND), KEY(5, 3, KEY_RECORD), KEY(5, 0, KEY_VOLUMEUP), KEY(5, 1, KEY_VOLUMEDOWN), KEY(3, 0, KEY_F22), /* soft1 */ KEY(3, 3, KEY_F23), /* soft2 */ }; static struct pxa27x_keypad_platform_data tavorevb_keypad_info = { .matrix_key_rows = 7, .matrix_key_cols = 7, .matrix_key_map = tavorevb_matrix_key_map, .matrix_key_map_size = ARRAY_SIZE(tavorevb_matrix_key_map), .debounce_interval = 30, }; static void __init tavorevb_init_keypad(void) { pxa_set_keypad_info(&tavorevb_keypad_info); } #else static inline void tavorevb_init_keypad(void) {} #endif /* CONFIG_KEYBOARD_PXA27x || CONFIG_KEYBOARD_PXA27x_MODULE */ #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct platform_pwm_backlight_data tavorevb_backlight_data[] = { [0] = { /* primary backlight */ .pwm_id = 2, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, }, [1] = { /* secondary backlight */ .pwm_id = 0, .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, }, }; static struct platform_device tavorevb_backlight_devices[] = { [0] = { .name = "pwm-backlight", .id = 0, .dev = { .platform_data = &tavorevb_backlight_data[0], }, }, [1] = { .name = "pwm-backlight", .id = 1, .dev = { .platform_data = &tavorevb_backlight_data[1], }, }, }; static uint16_t panel_init[] = { /* DSTB OUT */ SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), SMART_CMD(0x00), SMART_CMD_NOOP, SMART_DELAY(1), /* STB OUT */ SMART_CMD(0x00), SMART_CMD(0x1D), SMART_DAT(0x00), SMART_DAT(0x05), SMART_DELAY(1), /* P-ON Init sequence */ SMART_CMD(0x00), /* OSC ON */ SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x01), SMART_CMD(0x00), SMART_CMD(0x01), /* SOURCE DRIVER SHIFT DIRECTION and display RAM setting */ SMART_DAT(0x01), SMART_DAT(0x27), SMART_CMD(0x00), SMART_CMD(0x02), /* LINE INV */ SMART_DAT(0x02), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x03), /* IF mode(1) */ SMART_DAT(0x01), /* 8bit smart mode(8-8),high speed write mode */ SMART_DAT(0x30), SMART_CMD(0x07), SMART_CMD(0x00), /* RAM Write Mode */ SMART_DAT(0x00), SMART_DAT(0x03), SMART_CMD(0x00), /* DISPLAY Setting, 262K, fixed(NO scroll), no split screen */ SMART_CMD(0x07), SMART_DAT(0x40), /* 16/18/19 BPP */ SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x08), /* BP, FP Seting, BP=2H, FP=3H */ SMART_DAT(0x03), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x0C), /* IF mode(2), using internal clock & MPU */ SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x0D), /* Frame setting, 1Min. Frequence, 16CLK */ SMART_DAT(0x00), SMART_DAT(0x10), SMART_CMD(0x00), SMART_CMD(0x12), /* Timing(1),ASW W=4CLK, ASW ST=1CLK */ SMART_DAT(0x03), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x13), /* Timing(2),OEV ST=0.5CLK, OEV ED=1CLK */ SMART_DAT(0x01), SMART_DAT(0x02), SMART_CMD(0x00), SMART_CMD(0x14), /* Timing(3), ASW HOLD=0.5CLK */ SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x15), /* Timing(4), CKV ST=0CLK, CKV ED=1CLK */ SMART_DAT(0x20), SMART_DAT(0x00), SMART_CMD(0x00), SMART_CMD(0x1C), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x03), SMART_CMD(0x00), SMART_DAT(0x04), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x01), SMART_DAT(0x03), SMART_DAT(0x04), SMART_CMD(0x03), SMART_CMD(0x02), SMART_DAT(0x04), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x03), SMART_DAT(0x03), SMART_DAT(0x03), SMART_CMD(0x03), SMART_CMD(0x04), SMART_DAT(0x01), SMART_DAT(0x01), SMART_CMD(0x03), SMART_CMD(0x05), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x04), SMART_CMD(0x02), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x04), SMART_CMD(0x03), SMART_DAT(0x01), SMART_DAT(0x3F), SMART_DELAY(0), /* DISP RAM setting: 240*320 */ SMART_CMD(0x04), /* HADDR, START 0 */ SMART_CMD(0x06), SMART_DAT(0x00), SMART_DAT(0x00), /* x1,3 */ SMART_CMD(0x04), /* HADDR, END 4 */ SMART_CMD(0x07), SMART_DAT(0x00), SMART_DAT(0xEF), /* x2, 7 */ SMART_CMD(0x04), /* VADDR, START 8 */ SMART_CMD(0x08), SMART_DAT(0x00), /* y1, 10 */ SMART_DAT(0x00), /* y1, 11 */ SMART_CMD(0x04), /* VADDR, END 12 */ SMART_CMD(0x09), SMART_DAT(0x01), /* y2, 14 */ SMART_DAT(0x3F), /* y2, 15 */ SMART_CMD(0x02), /* RAM ADDR SETTING 16 */ SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x00), /* x1, 19 */ SMART_CMD(0x02), /* RAM ADDR SETTING 20 */ SMART_CMD(0x01), SMART_DAT(0x00), /* y1, 22 */ SMART_DAT(0x00), /* y1, 23 */ }; static uint16_t panel_on[] = { /* Power-IC ON */ SMART_CMD(0x01), SMART_CMD(0x02), SMART_DAT(0x07), SMART_DAT(0x7D), SMART_CMD(0x01), SMART_CMD(0x03), SMART_DAT(0x00), SMART_DAT(0x05), SMART_CMD(0x01), SMART_CMD(0x04), SMART_DAT(0x00), SMART_DAT(0x00), SMART_CMD(0x01), SMART_CMD(0x05), SMART_DAT(0x00), SMART_DAT(0x15), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xC0), SMART_DAT(0x10), SMART_DELAY(30), /* DISP ON */ SMART_CMD(0x01), SMART_CMD(0x01), SMART_DAT(0x00), SMART_DAT(0x01), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xFF), SMART_DAT(0xFE), SMART_DELAY(150), }; static uint16_t panel_off[] = { SMART_CMD(0x00), SMART_CMD(0x1E), SMART_DAT(0x00), SMART_DAT(0x0A), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xFF), SMART_DAT(0xEE), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xF8), SMART_DAT(0x12), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xE8), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0xC0), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0x40), SMART_DAT(0x11), SMART_CMD(0x01), SMART_CMD(0x00), SMART_DAT(0x00), SMART_DAT(0x10), }; static uint16_t update_framedata[] = { /* write ram */ SMART_CMD(0x02), SMART_CMD(0x02), /* write frame data */ SMART_CMD_WRITE_FRAME, }; static void ltm020d550_lcd_power(int on, struct fb_var_screeninfo *var) { struct fb_info *info = container_of(var, struct fb_info, var); if (on) { pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_init)); pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_on)); } else { pxafb_smart_queue(info, ARRAY_AND_SIZE(panel_off)); } if (pxafb_smart_flush(info)) pr_err("%s: timed out\n", __func__); } static void ltm020d550_update(struct fb_info *info) { pxafb_smart_queue(info, ARRAY_AND_SIZE(update_framedata)); pxafb_smart_flush(info); } static struct pxafb_mode_info toshiba_ltm020d550_modes[] = { [0] = { .xres = 240, .yres = 320, .bpp = 16, .a0csrd_set_hld = 30, .a0cswr_set_hld = 30, .wr_pulse_width = 30, .rd_pulse_width = 170, .op_hold_time = 30, .cmd_inh_time = 60, /* L_LCLK_A0 and L_LCLK_RD active low */ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static struct pxafb_mach_info tavorevb_lcd_info = { .modes = toshiba_ltm020d550_modes, .num_modes = 1, .lcd_conn = LCD_SMART_PANEL_8BPP | LCD_PCLK_EDGE_FALL, .pxafb_lcd_power = ltm020d550_lcd_power, .smart_update = ltm020d550_update, }; static void __init tavorevb_init_lcd(void) { platform_device_register(&tavorevb_backlight_devices[0]); platform_device_register(&tavorevb_backlight_devices[1]); pxa_set_fb_info(NULL, &tavorevb_lcd_info); } #else static inline void tavorevb_init_lcd(void) {} #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ static void __init tavorevb_init(void) { /* initialize MFP configurations */ pxa3xx_mfp_config(ARRAY_AND_SIZE(tavorevb_mfp_cfg)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); platform_device_register(&smc91x_device); tavorevb_init_lcd(); tavorevb_init_keypad(); } MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)") /* Maintainer: Eric Miao <eric.miao@marvell.com> */ .atag_offset = 0x100, .map_io = pxa3xx_map_io, .init_irq = pxa3xx_init_irq, .handle_irq = pxa3xx_handle_irq, .timer = &pxa_timer, .init_machine = tavorevb_init, MACHINE_END
gpl-2.0
sankar-p/opt-hotplug
drivers/iio/magnetometer/mag3110.c
683
10639
/* * mag3110.c - Support for Freescale MAG3110 magnetometer sensor * * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net> * * This file is subject to the terms and conditions of version 2 of * the GNU General Public License. See the file COPYING in the main * directory of this archive for more details. * * (7-bit I2C slave address 0x0e) * * TODO: irq, user offset, oversampling, continuous mode */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/buffer.h> #include <linux/iio/triggered_buffer.h> #include <linux/delay.h> #define MAG3110_STATUS 0x00 #define MAG3110_OUT_X 0x01 /* MSB first */ #define MAG3110_OUT_Y 0x03 #define MAG3110_OUT_Z 0x05 #define MAG3110_WHO_AM_I 0x07 #define MAG3110_OFF_X 0x09 /* MSB first */ #define MAG3110_OFF_Y 0x0b #define MAG3110_OFF_Z 0x0d #define MAG3110_DIE_TEMP 0x0f #define MAG3110_CTRL_REG1 0x10 #define MAG3110_CTRL_REG2 0x11 #define MAG3110_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0)) #define MAG3110_CTRL_DR_MASK (BIT(7) | BIT(6) | BIT(5)) #define MAG3110_CTRL_DR_SHIFT 5 #define MAG3110_CTRL_DR_DEFAULT 0 #define MAG3110_CTRL_TM BIT(1) /* trigger single measurement */ #define MAG3110_CTRL_AC BIT(0) /* continuous measurements */ #define MAG3110_CTRL_AUTO_MRST_EN BIT(7) /* magnetic auto-reset */ #define MAG3110_CTRL_RAW BIT(5) /* measurements not user-offset corrected */ #define MAG3110_DEVICE_ID 0xc4 /* Each client has this additional data */ struct mag3110_data { struct i2c_client *client; struct mutex lock; u8 ctrl_reg1; }; static int mag3110_request(struct mag3110_data *data) { int ret, tries = 150; /* trigger measurement */ ret = i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1, data->ctrl_reg1 | MAG3110_CTRL_TM); if (ret < 0) return ret; while (tries-- > 0) { ret = i2c_smbus_read_byte_data(data->client, MAG3110_STATUS); if (ret < 0) return ret; /* wait for data ready */ if ((ret & MAG3110_STATUS_DRDY) == MAG3110_STATUS_DRDY) break; msleep(20); } if (tries < 0) { dev_err(&data->client->dev, "data not ready\n"); return -EIO; } return 0; } static int mag3110_read(struct mag3110_data *data, __be16 buf[3]) { int ret; mutex_lock(&data->lock); ret = mag3110_request(data); if (ret < 0) { mutex_unlock(&data->lock); return ret; } ret = i2c_smbus_read_i2c_block_data(data->client, MAG3110_OUT_X, 3 * sizeof(__be16), (u8 *) buf); mutex_unlock(&data->lock); return ret; } static ssize_t mag3110_show_int_plus_micros(char *buf, const int (*vals)[2], int n) { size_t len = 0; while (n-- > 0) len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ", vals[n][0], vals[n][1]); /* replace trailing space by newline */ buf[len - 1] = '\n'; return len; } static int mag3110_get_int_plus_micros_index(const int (*vals)[2], int n, int val, int val2) { while (n-- > 0) if (val == vals[n][0] && val2 == vals[n][1]) return n; return -EINVAL; } static const int mag3110_samp_freq[8][2] = { {80, 0}, {40, 0}, {20, 0}, {10, 0}, {5, 0}, {2, 500000}, {1, 250000}, {0, 625000} }; static ssize_t mag3110_show_samp_freq_avail(struct device *dev, struct device_attribute *attr, char *buf) { return mag3110_show_int_plus_micros(buf, mag3110_samp_freq, 8); } static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mag3110_show_samp_freq_avail); static int mag3110_get_samp_freq_index(struct mag3110_data *data, int val, int val2) { return mag3110_get_int_plus_micros_index(mag3110_samp_freq, 8, val, val2); } static int mag3110_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct mag3110_data *data = iio_priv(indio_dev); __be16 buffer[3]; int i, ret; switch (mask) { case IIO_CHAN_INFO_RAW: if (iio_buffer_enabled(indio_dev)) return -EBUSY; switch (chan->type) { case IIO_MAGN: /* in 0.1 uT / LSB */ ret = mag3110_read(data, buffer); if (ret < 0) return ret; *val = sign_extend32( be16_to_cpu(buffer[chan->scan_index]), 15); return IIO_VAL_INT; case IIO_TEMP: /* in 1 C / LSB */ mutex_lock(&data->lock); ret = mag3110_request(data); if (ret < 0) { mutex_unlock(&data->lock); return ret; } ret = i2c_smbus_read_byte_data(data->client, MAG3110_DIE_TEMP); mutex_unlock(&data->lock); if (ret < 0) return ret; *val = sign_extend32(ret, 7); return IIO_VAL_INT; default: return -EINVAL; } case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_MAGN: *val = 0; *val2 = 1000; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 1000; return IIO_VAL_INT; default: return -EINVAL; } case IIO_CHAN_INFO_SAMP_FREQ: i = data->ctrl_reg1 >> MAG3110_CTRL_DR_SHIFT; *val = mag3110_samp_freq[i][0]; *val2 = mag3110_samp_freq[i][1]; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_CALIBBIAS: ret = i2c_smbus_read_word_swapped(data->client, MAG3110_OFF_X + 2 * chan->scan_index); if (ret < 0) return ret; *val = sign_extend32(ret >> 1, 14); return IIO_VAL_INT; } return -EINVAL; } static int mag3110_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct mag3110_data *data = iio_priv(indio_dev); int rate; if (iio_buffer_enabled(indio_dev)) return -EBUSY; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: rate = mag3110_get_samp_freq_index(data, val, val2); if (rate < 0) return -EINVAL; data->ctrl_reg1 &= ~MAG3110_CTRL_DR_MASK; data->ctrl_reg1 |= rate << MAG3110_CTRL_DR_SHIFT; return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1, data->ctrl_reg1); case IIO_CHAN_INFO_CALIBBIAS: if (val < -10000 || val > 10000) return -EINVAL; return i2c_smbus_write_word_swapped(data->client, MAG3110_OFF_X + 2 * chan->scan_index, val << 1); default: return -EINVAL; } } static irqreturn_t mag3110_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mag3110_data *data = iio_priv(indio_dev); u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */ int ret; ret = mag3110_read(data, (__be16 *) buffer); if (ret < 0) goto done; if (test_bit(3, indio_dev->active_scan_mask)) { ret = i2c_smbus_read_byte_data(data->client, MAG3110_DIE_TEMP); if (ret < 0) goto done; buffer[6] = ret; } iio_push_to_buffers_with_timestamp(indio_dev, buffer, iio_get_time_ns()); done: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } #define MAG3110_CHANNEL(axis, idx) { \ .type = IIO_MAGN, \ .modified = 1, \ .channel2 = IIO_MOD_##axis, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_CALIBBIAS), \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = idx, \ .scan_type = { \ .sign = 's', \ .realbits = 16, \ .storagebits = 16, \ .endianness = IIO_BE, \ }, \ } static const struct iio_chan_spec mag3110_channels[] = { MAG3110_CHANNEL(X, 0), MAG3110_CHANNEL(Y, 1), MAG3110_CHANNEL(Z, 2), { .type = IIO_TEMP, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), .scan_index = 3, .scan_type = { .sign = 's', .realbits = 8, .storagebits = 8, }, }, IIO_CHAN_SOFT_TIMESTAMP(4), }; static struct attribute *mag3110_attributes[] = { &iio_dev_attr_sampling_frequency_available.dev_attr.attr, NULL }; static const struct attribute_group mag3110_group = { .attrs = mag3110_attributes, }; static const struct iio_info mag3110_info = { .attrs = &mag3110_group, .read_raw = &mag3110_read_raw, .write_raw = &mag3110_write_raw, .driver_module = THIS_MODULE, }; static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0}; static int mag3110_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mag3110_data *data; struct iio_dev *indio_dev; int ret; ret = i2c_smbus_read_byte_data(client, MAG3110_WHO_AM_I); if (ret < 0) return ret; if (ret != MAG3110_DEVICE_ID) return -ENODEV; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); data->client = client; mutex_init(&data->lock); i2c_set_clientdata(client, indio_dev); indio_dev->info = &mag3110_info; indio_dev->name = id->name; indio_dev->dev.parent = &client->dev; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = mag3110_channels; indio_dev->num_channels = ARRAY_SIZE(mag3110_channels); indio_dev->available_scan_masks = mag3110_scan_masks; data->ctrl_reg1 = MAG3110_CTRL_DR_DEFAULT << MAG3110_CTRL_DR_SHIFT; ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG1, data->ctrl_reg1); if (ret < 0) return ret; ret = i2c_smbus_write_byte_data(client, MAG3110_CTRL_REG2, MAG3110_CTRL_AUTO_MRST_EN); if (ret < 0) return ret; ret = iio_triggered_buffer_setup(indio_dev, NULL, mag3110_trigger_handler, NULL); if (ret < 0) return ret; ret = iio_device_register(indio_dev); if (ret < 0) goto buffer_cleanup; return 0; buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); return ret; } static int mag3110_standby(struct mag3110_data *data) { return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1, data->ctrl_reg1 & ~MAG3110_CTRL_AC); } static int mag3110_remove(struct i2c_client *client) { struct iio_dev *indio_dev = i2c_get_clientdata(client); iio_device_unregister(indio_dev); iio_triggered_buffer_cleanup(indio_dev); mag3110_standby(iio_priv(indio_dev)); return 0; } #ifdef CONFIG_PM_SLEEP static int mag3110_suspend(struct device *dev) { return mag3110_standby(iio_priv(i2c_get_clientdata( to_i2c_client(dev)))); } static int mag3110_resume(struct device *dev) { struct mag3110_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); return i2c_smbus_write_byte_data(data->client, MAG3110_CTRL_REG1, data->ctrl_reg1); } static SIMPLE_DEV_PM_OPS(mag3110_pm_ops, mag3110_suspend, mag3110_resume); #define MAG3110_PM_OPS (&mag3110_pm_ops) #else #define MAG3110_PM_OPS NULL #endif static const struct i2c_device_id mag3110_id[] = { { "mag3110", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mag3110_id); static struct i2c_driver mag3110_driver = { .driver = { .name = "mag3110", .pm = MAG3110_PM_OPS, }, .probe = mag3110_probe, .remove = mag3110_remove, .id_table = mag3110_id, }; module_i2c_driver(mag3110_driver); MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>"); MODULE_DESCRIPTION("Freescale MAG3110 magnetometer driver"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/zte-kernel-msm7x27
fs/nilfs2/dat.c
939
12571
/* * dat.c - NILFS disk address translation. * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Written by Koji Sato <koji@osrg.net>. */ #include <linux/types.h> #include <linux/buffer_head.h> #include <linux/string.h> #include <linux/errno.h> #include "nilfs.h" #include "mdt.h" #include "alloc.h" #include "dat.h" #define NILFS_CNO_MIN ((__u64)1) #define NILFS_CNO_MAX (~(__u64)0) struct nilfs_dat_info { struct nilfs_mdt_info mi; struct nilfs_palloc_cache palloc_cache; }; static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat) { return (struct nilfs_dat_info *)NILFS_MDT(dat); } static int nilfs_dat_prepare_entry(struct inode *dat, struct nilfs_palloc_req *req, int create) { return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, create, &req->pr_entry_bh); } static void nilfs_dat_commit_entry(struct inode *dat, struct nilfs_palloc_req *req) { nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh); nilfs_mdt_mark_dirty(dat); brelse(req->pr_entry_bh); } static void nilfs_dat_abort_entry(struct inode *dat, struct nilfs_palloc_req *req) { brelse(req->pr_entry_bh); } int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) { int ret; ret = nilfs_palloc_prepare_alloc_entry(dat, req); if (ret < 0) return ret; ret = nilfs_dat_prepare_entry(dat, req, 1); if (ret < 0) nilfs_palloc_abort_alloc_entry(dat, req); return ret; } void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MAX); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr, KM_USER0); nilfs_palloc_commit_alloc_entry(dat, req); nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) { nilfs_dat_abort_entry(dat, req); nilfs_palloc_abort_alloc_entry(dat, req); } void nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MIN); entry->de_blocknr = cpu_to_le64(0); kunmap_atomic(kaddr, KM_USER0); nilfs_dat_commit_entry(dat, req); nilfs_palloc_commit_free_entry(dat, req); } int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) { int ret; ret = nilfs_dat_prepare_entry(dat, req, 0); WARN_ON(ret == -ENOENT); return ret; } void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, sector_t blocknr) { struct nilfs_dat_entry *entry; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr, KM_USER0); nilfs_dat_commit_entry(dat, req); } int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; int ret; ret = nilfs_dat_prepare_entry(dat, req, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); return ret; } kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr, KM_USER0); if (blocknr == 0) { ret = nilfs_palloc_prepare_free_entry(dat, req); if (ret < 0) { nilfs_dat_abort_entry(dat, req); return ret; } } return 0; } void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, int dead) { struct nilfs_dat_entry *entry; __u64 start, end; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); end = start = le64_to_cpu(entry->de_start); if (!dead) { end = nilfs_mdt_cno(dat); WARN_ON(start > end); } entry->de_end = cpu_to_le64(end); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr, KM_USER0); if (blocknr == 0) nilfs_dat_commit_free(dat, req); else nilfs_dat_commit_entry(dat, req); } void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) { struct nilfs_dat_entry *entry; __u64 start; sector_t blocknr; void *kaddr; kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, req->pr_entry_bh, kaddr); start = le64_to_cpu(entry->de_start); blocknr = le64_to_cpu(entry->de_blocknr); kunmap_atomic(kaddr, KM_USER0); if (start == nilfs_mdt_cno(dat) && blocknr == 0) nilfs_palloc_abort_free_entry(dat, req); nilfs_dat_abort_entry(dat, req); } int nilfs_dat_prepare_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { int ret; ret = nilfs_dat_prepare_end(dat, oldreq); if (!ret) { ret = nilfs_dat_prepare_alloc(dat, newreq); if (ret < 0) nilfs_dat_abort_end(dat, oldreq); } return ret; } void nilfs_dat_commit_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq, int dead) { nilfs_dat_commit_end(dat, oldreq, dead); nilfs_dat_commit_alloc(dat, newreq); } void nilfs_dat_abort_update(struct inode *dat, struct nilfs_palloc_req *oldreq, struct nilfs_palloc_req *newreq) { nilfs_dat_abort_end(dat, oldreq); nilfs_dat_abort_alloc(dat, newreq); } /** * nilfs_dat_mark_dirty - * @dat: DAT file inode * @vblocknr: virtual block number * * Description: * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) { struct nilfs_palloc_req req; int ret; req.pr_entry_nr = vblocknr; ret = nilfs_dat_prepare_entry(dat, &req, 0); if (ret == 0) nilfs_dat_commit_entry(dat, &req); return ret; } /** * nilfs_dat_freev - free virtual block numbers * @dat: DAT file inode * @vblocknrs: array of virtual block numbers * @nitems: number of virtual block numbers * * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The virtual block number have not been allocated. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { return nilfs_palloc_freev(dat, vblocknrs, nitems); } /** * nilfs_dat_move - change a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknr: block number * * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, (unsigned long long)vblocknr, (unsigned long long)le64_to_cpu(entry->de_start), (unsigned long long)le64_to_cpu(entry->de_end)); kunmap_atomic(kaddr, KM_USER0); brelse(entry_bh); return -EINVAL; } WARN_ON(blocknr == 0); entry->de_blocknr = cpu_to_le64(blocknr); kunmap_atomic(kaddr, KM_USER0); nilfs_mdt_mark_buffer_dirty(entry_bh); nilfs_mdt_mark_dirty(dat); brelse(entry_bh); return 0; } /** * nilfs_dat_translate - translate a virtual block number to a block number * @dat: DAT file inode * @vblocknr: virtual block number * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr * to the corresponding block number. * * Return Value: On success, 0 is returned and the block number associated * with @vblocknr is stored in the place pointed by @blocknrp. On error, one * of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - A block number associated with @vblocknr does not exist. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; sector_t blocknr; void *kaddr; int ret; ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh); if (ret < 0) return ret; kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); blocknr = le64_to_cpu(entry->de_blocknr); if (blocknr == 0) { ret = -ENOENT; goto out; } *blocknrp = blocknr; out: kunmap_atomic(kaddr, KM_USER0); brelse(entry_bh); return ret; } ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, size_t nvi) { struct buffer_head *entry_bh; struct nilfs_dat_entry *entry; struct nilfs_vinfo *vinfo = buf; __u64 first, last; void *kaddr; unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block; int i, j, n, ret; for (i = 0; i < nvi; i += n) { ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr, 0, &entry_bh); if (ret < 0) return ret; kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); /* last virtual block number in this block */ first = vinfo->vi_vblocknr; do_div(first, entries_per_block); first *= entries_per_block; last = first + entries_per_block - 1; for (j = i, n = 0; j < nvi && vinfo->vi_vblocknr >= first && vinfo->vi_vblocknr <= last; j++, n++, vinfo = (void *)vinfo + visz) { entry = nilfs_palloc_block_get_entry( dat, vinfo->vi_vblocknr, entry_bh, kaddr); vinfo->vi_start = le64_to_cpu(entry->de_start); vinfo->vi_end = le64_to_cpu(entry->de_end); vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); } kunmap_atomic(kaddr, KM_USER0); brelse(entry_bh); } return nvi; } /** * nilfs_dat_read - read dat inode * @dat: dat inode * @raw_inode: on-disk dat inode */ int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode) { return nilfs_read_inode_common(dat, raw_inode); } /** * nilfs_dat_new - create dat file * @nilfs: nilfs object * @entry_size: size of a dat entry */ struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size) { static struct lock_class_key dat_lock_key; struct inode *dat; struct nilfs_dat_info *di; int err; dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di)); if (dat) { err = nilfs_palloc_init_blockgroup(dat, entry_size); if (unlikely(err)) { nilfs_mdt_destroy(dat); return NULL; } di = NILFS_DAT_I(dat); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); nilfs_palloc_setup_cache(dat, &di->palloc_cache); } return dat; }
gpl-2.0
longman88/kernel-qspinlock-v10
drivers/gpu/drm/nouveau/core/subdev/bios/boost.c
939
3587
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/bios.h> #include <subdev/bios/bit.h> #include <subdev/bios/boost.h> u16 nvbios_boostTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz) { struct bit_entry bit_P; u16 boost = 0x0000; if (!bit_entry(bios, 'P', &bit_P)) { if (bit_P.version == 2) boost = nv_ro16(bios, bit_P.offset + 0x30); if (boost) { *ver = nv_ro08(bios, boost + 0); switch (*ver) { case 0x11: *hdr = nv_ro08(bios, boost + 1); *cnt = nv_ro08(bios, boost + 5); *len = nv_ro08(bios, boost + 2); *snr = nv_ro08(bios, boost + 4); *ssz = nv_ro08(bios, boost + 3); return boost; default: break; } } } return 0x0000; } u16 nvbios_boostEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u8 snr, ssz; u16 data = nvbios_boostTe(bios, ver, hdr, cnt, len, &snr, &ssz); if (data && idx < *cnt) { data = data + *hdr + (idx * (*len + (snr * ssz))); *hdr = *len; *cnt = snr; *len = ssz; return data; } return 0x0000; } u16 nvbios_boostEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info) { u16 data = nvbios_boostEe(bios, idx, ver, hdr, cnt, len); memset(info, 0x00, sizeof(*info)); if (data) { info->pstate = (nv_ro16(bios, data + 0x00) & 0x01e0) >> 5; info->min = nv_ro16(bios, data + 0x02) * 1000; info->max = nv_ro16(bios, data + 0x04) * 1000; } return data; } u16 nvbios_boostEm(struct nouveau_bios *bios, u8 pstate, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_boostE *info) { u32 data, idx = 0; while ((data = nvbios_boostEp(bios, idx++, ver, hdr, cnt, len, info))) { if (info->pstate == pstate) break; } return data; } u16 nvbios_boostSe(struct nouveau_bios *bios, int idx, u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len) { if (data && idx < cnt) { data = data + *hdr + (idx * len); *hdr = len; return data; } return 0x0000; } u16 nvbios_boostSp(struct nouveau_bios *bios, int idx, u16 data, u8 *ver, u8 *hdr, u8 cnt, u8 len, struct nvbios_boostS *info) { data = nvbios_boostSe(bios, idx, data, ver, hdr, cnt, len); memset(info, 0x00, sizeof(*info)); if (data) { info->domain = nv_ro08(bios, data + 0x00); info->percent = nv_ro08(bios, data + 0x01); info->min = nv_ro16(bios, data + 0x02) * 1000; info->max = nv_ro16(bios, data + 0x04) * 1000; } return data; }
gpl-2.0
fanyukui/linux3.12.10
drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
939
6677
/* * Copyright 1993-2003 NVIDIA, Corporation * Copyright 2007-2009 Stuart Bennett * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <subdev/bios.h> #include <subdev/bios/pll.h> #include "pll.h" static int getMNP_single(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk, int *pN, int *pM, int *pP) { /* Find M, N and P for a single stage PLL * * Note that some bioses (NV3x) have lookup tables of precomputed MNP * values, but we're too lazy to use those atm * * "clk" parameter in kHz * returns calculated clock */ struct nouveau_bios *bios = nouveau_bios(subdev); int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq; int minM = info->vco1.min_m, maxM = info->vco1.max_m; int minN = info->vco1.min_n, maxN = info->vco1.max_n; int minU = info->vco1.min_inputfreq; int maxU = info->vco1.max_inputfreq; int minP = info->min_p; int maxP = info->max_p_usable; int crystal = info->refclk; int M, N, thisP, P; int clkP, calcclk; int delta, bestdelta = INT_MAX; int bestclk = 0; /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */ /* possibly correlated with introduction of 27MHz crystal */ if (bios->version.major < 0x60) { int cv = bios->version.chip; if (cv < 0x17 || cv == 0x1a || cv == 0x20) { if (clk > 250000) maxM = 6; if (clk > 340000) maxM = 2; } else if (cv < 0x40) { if (clk > 150000) maxM = 6; if (clk > 200000) maxM = 4; if (clk > 340000) maxM = 2; } } P = 1 << maxP; if ((clk * P) < minvco) { minvco = clk * maxP; maxvco = minvco * 2; } if (clk + clk/200 > maxvco) /* +0.5% */ maxvco = clk + clk/200; /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */ for (thisP = minP; thisP <= maxP; thisP++) { P = 1 << thisP; clkP = clk * P; if (clkP < minvco) continue; if (clkP > maxvco) return bestclk; for (M = minM; M <= maxM; M++) { if (crystal/M < minU) return bestclk; if (crystal/M > maxU) continue; /* add crystal/2 to round better */ N = (clkP * M + crystal/2) / crystal; if (N < minN) continue; if (N > maxN) break; /* more rounding additions */ calcclk = ((N * crystal + P/2) / P + M/2) / M; delta = abs(calcclk - clk); /* we do an exhaustive search rather than terminating * on an optimality condition... */ if (delta < bestdelta) { bestdelta = delta; bestclk = calcclk; *pN = N; *pM = M; *pP = thisP; if (delta == 0) /* except this one */ return bestclk; } } } return bestclk; } static int getMNP_double(struct nouveau_subdev *subdev, struct nvbios_pll *info, int clk, int *pN1, int *pM1, int *pN2, int *pM2, int *pP) { /* Find M, N and P for a two stage PLL * * Note that some bioses (NV30+) have lookup tables of precomputed MNP * values, but we're too lazy to use those atm * * "clk" parameter in kHz * returns calculated clock */ int chip_version = nouveau_bios(subdev)->version.chip; int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq; int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq; int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq; int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq; int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m; int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n; int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m; int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n; int maxlog2P = info->max_p_usable; int crystal = info->refclk; bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2); int M1, N1, M2, N2, log2P; int clkP, calcclk1, calcclk2, calcclkout; int delta, bestdelta = INT_MAX; int bestclk = 0; int vco2 = (maxvco2 - maxvco2/200) / 2; for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++) ; clkP = clk << log2P; if (maxvco2 < clk + clk/200) /* +0.5% */ maxvco2 = clk + clk/200; for (M1 = minM1; M1 <= maxM1; M1++) { if (crystal/M1 < minU1) return bestclk; if (crystal/M1 > maxU1) continue; for (N1 = minN1; N1 <= maxN1; N1++) { calcclk1 = crystal * N1 / M1; if (calcclk1 < minvco1) continue; if (calcclk1 > maxvco1) break; for (M2 = minM2; M2 <= maxM2; M2++) { if (calcclk1/M2 < minU2) break; if (calcclk1/M2 > maxU2) continue; /* add calcclk1/2 to round better */ N2 = (clkP * M2 + calcclk1/2) / calcclk1; if (N2 < minN2) continue; if (N2 > maxN2) break; if (!fixedgain2) { if (chip_version < 0x60) if (N2/M2 < 4 || N2/M2 > 10) continue; calcclk2 = calcclk1 * N2 / M2; if (calcclk2 < minvco2) break; if (calcclk2 > maxvco2) continue; } else calcclk2 = calcclk1; calcclkout = calcclk2 >> log2P; delta = abs(calcclkout - clk); /* we do an exhaustive search rather than terminating * on an optimality condition... */ if (delta < bestdelta) { bestdelta = delta; bestclk = calcclkout; *pN1 = N1; *pM1 = M1; *pN2 = N2; *pM2 = M2; *pP = log2P; if (delta == 0) /* except this one */ return bestclk; } } } } return bestclk; } int nv04_pll_calc(struct nouveau_subdev *subdev, struct nvbios_pll *info, u32 freq, int *N1, int *M1, int *N2, int *M2, int *P) { int ret; if (!info->vco2.max_freq || !N2) { ret = getMNP_single(subdev, info, freq, N1, M1, P); if (N2) { *N2 = 1; *M2 = 1; } } else { ret = getMNP_double(subdev, info, freq, N1, M1, N2, M2, P); } if (!ret) nv_error(subdev, "unable to compute acceptable pll values\n"); return ret; }
gpl-2.0
longman88/kernel-qspinlock-v13
arch/arm/mach-orion5x/common.c
1451
10762
/* * arch/arm/mach-orion5x/common.c * * Core functions for Marvell Orion 5x SoCs * * Maintainer: Tzachi Perelstein <tzachi@marvell.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/serial_8250.h> #include <linux/mv643xx_i2c.h> #include <linux/ata_platform.h> #include <linux/delay.h> #include <linux/clk-provider.h> #include <linux/cpu.h> #include <net/dsa.h> #include <asm/page.h> #include <asm/setup.h> #include <asm/system_misc.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/bridge-regs.h> #include <mach/hardware.h> #include <mach/orion5x.h> #include <linux/platform_data/mtd-orion_nand.h> #include <linux/platform_data/usb-ehci-orion.h> #include <plat/time.h> #include <plat/common.h> #include "common.h" /***************************************************************************** * I/O Address Mapping ****************************************************************************/ static struct map_desc orion5x_io_desc[] __initdata = { { .virtual = (unsigned long) ORION5X_REGS_VIRT_BASE, .pfn = __phys_to_pfn(ORION5X_REGS_PHYS_BASE), .length = ORION5X_REGS_SIZE, .type = MT_DEVICE, }, { .virtual = (unsigned long) ORION5X_PCIE_WA_VIRT_BASE, .pfn = __phys_to_pfn(ORION5X_PCIE_WA_PHYS_BASE), .length = ORION5X_PCIE_WA_SIZE, .type = MT_DEVICE, }, }; void __init orion5x_map_io(void) { iotable_init(orion5x_io_desc, ARRAY_SIZE(orion5x_io_desc)); } /***************************************************************************** * CLK tree ****************************************************************************/ static struct clk *tclk; void __init clk_init(void) { tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT, orion5x_tclk); orion_clkdev_init(tclk); } /***************************************************************************** * EHCI0 ****************************************************************************/ void __init orion5x_ehci0_init(void) { orion_ehci_init(ORION5X_USB0_PHYS_BASE, IRQ_ORION5X_USB0_CTRL, EHCI_PHY_ORION); } /***************************************************************************** * EHCI1 ****************************************************************************/ void __init orion5x_ehci1_init(void) { orion_ehci_1_init(ORION5X_USB1_PHYS_BASE, IRQ_ORION5X_USB1_CTRL); } /***************************************************************************** * GE00 ****************************************************************************/ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, IRQ_ORION5X_ETH_ERR, MV643XX_TX_CSUM_DEFAULT_LIMIT); } /***************************************************************************** * Ethernet switch ****************************************************************************/ void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq) { orion_ge00_switch_init(d, irq); } /***************************************************************************** * I2C ****************************************************************************/ void __init orion5x_i2c_init(void) { orion_i2c_init(I2C_PHYS_BASE, IRQ_ORION5X_I2C, 8); } /***************************************************************************** * SATA ****************************************************************************/ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data) { orion_sata_init(sata_data, ORION5X_SATA_PHYS_BASE, IRQ_ORION5X_SATA); } /***************************************************************************** * SPI ****************************************************************************/ void __init orion5x_spi_init(void) { orion_spi_init(SPI_PHYS_BASE); } /***************************************************************************** * UART0 ****************************************************************************/ void __init orion5x_uart0_init(void) { orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, IRQ_ORION5X_UART0, tclk); } /***************************************************************************** * UART1 ****************************************************************************/ void __init orion5x_uart1_init(void) { orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, IRQ_ORION5X_UART1, tclk); } /***************************************************************************** * XOR engine ****************************************************************************/ void __init orion5x_xor_init(void) { orion_xor0_init(ORION5X_XOR_PHYS_BASE, ORION5X_XOR_PHYS_BASE + 0x200, IRQ_ORION5X_XOR0, IRQ_ORION5X_XOR1); } /***************************************************************************** * Cryptographic Engines and Security Accelerator (CESA) ****************************************************************************/ static void __init orion5x_crypto_init(void) { mvebu_mbus_add_window_by_id(ORION_MBUS_SRAM_TARGET, ORION_MBUS_SRAM_ATTR, ORION5X_SRAM_PHYS_BASE, ORION5X_SRAM_SIZE); orion_crypto_init(ORION5X_CRYPTO_PHYS_BASE, ORION5X_SRAM_PHYS_BASE, SZ_8K, IRQ_ORION5X_CESA); } /***************************************************************************** * Watchdog ****************************************************************************/ static void __init orion5x_wdt_init(void) { orion_wdt_init(); } /***************************************************************************** * Time handling ****************************************************************************/ void __init orion5x_init_early(void) { u32 rev, dev; const char *mbus_soc_name; orion_time_set_base(TIMER_VIRT_BASE); /* Initialize the MBUS driver */ orion5x_pcie_id(&dev, &rev); if (dev == MV88F5281_DEV_ID) mbus_soc_name = "marvell,orion5x-88f5281-mbus"; else if (dev == MV88F5182_DEV_ID) mbus_soc_name = "marvell,orion5x-88f5182-mbus"; else if (dev == MV88F5181_DEV_ID) mbus_soc_name = "marvell,orion5x-88f5181-mbus"; else if (dev == MV88F6183_DEV_ID) mbus_soc_name = "marvell,orion5x-88f6183-mbus"; else mbus_soc_name = NULL; mvebu_mbus_init(mbus_soc_name, ORION5X_BRIDGE_WINS_BASE, ORION5X_BRIDGE_WINS_SZ, ORION5X_DDR_WINS_BASE, ORION5X_DDR_WINS_SZ); } void orion5x_setup_wins(void) { /* * The PCIe windows will no longer be statically allocated * here once Orion5x is migrated to the pci-mvebu driver. */ mvebu_mbus_add_window_remap_by_id(ORION_MBUS_PCIE_IO_TARGET, ORION_MBUS_PCIE_IO_ATTR, ORION5X_PCIE_IO_PHYS_BASE, ORION5X_PCIE_IO_SIZE, ORION5X_PCIE_IO_BUS_BASE); mvebu_mbus_add_window_by_id(ORION_MBUS_PCIE_MEM_TARGET, ORION_MBUS_PCIE_MEM_ATTR, ORION5X_PCIE_MEM_PHYS_BASE, ORION5X_PCIE_MEM_SIZE); mvebu_mbus_add_window_remap_by_id(ORION_MBUS_PCI_IO_TARGET, ORION_MBUS_PCI_IO_ATTR, ORION5X_PCI_IO_PHYS_BASE, ORION5X_PCI_IO_SIZE, ORION5X_PCI_IO_BUS_BASE); mvebu_mbus_add_window_by_id(ORION_MBUS_PCI_MEM_TARGET, ORION_MBUS_PCI_MEM_ATTR, ORION5X_PCI_MEM_PHYS_BASE, ORION5X_PCI_MEM_SIZE); } int orion5x_tclk; static int __init orion5x_find_tclk(void) { u32 dev, rev; orion5x_pcie_id(&dev, &rev); if (dev == MV88F6183_DEV_ID && (readl(MPP_RESET_SAMPLE) & 0x00000200) == 0) return 133333333; return 166666667; } void __init orion5x_timer_init(void) { orion5x_tclk = orion5x_find_tclk(); orion_time_init(ORION5X_BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, IRQ_ORION5X_BRIDGE, orion5x_tclk); } /***************************************************************************** * General ****************************************************************************/ /* * Identify device ID and rev from PCIe configuration header space '0'. */ void __init orion5x_id(u32 *dev, u32 *rev, char **dev_name) { orion5x_pcie_id(dev, rev); if (*dev == MV88F5281_DEV_ID) { if (*rev == MV88F5281_REV_D2) { *dev_name = "MV88F5281-D2"; } else if (*rev == MV88F5281_REV_D1) { *dev_name = "MV88F5281-D1"; } else if (*rev == MV88F5281_REV_D0) { *dev_name = "MV88F5281-D0"; } else { *dev_name = "MV88F5281-Rev-Unsupported"; } } else if (*dev == MV88F5182_DEV_ID) { if (*rev == MV88F5182_REV_A2) { *dev_name = "MV88F5182-A2"; } else { *dev_name = "MV88F5182-Rev-Unsupported"; } } else if (*dev == MV88F5181_DEV_ID) { if (*rev == MV88F5181_REV_B1) { *dev_name = "MV88F5181-Rev-B1"; } else if (*rev == MV88F5181L_REV_A1) { *dev_name = "MV88F5181L-Rev-A1"; } else { *dev_name = "MV88F5181(L)-Rev-Unsupported"; } } else if (*dev == MV88F6183_DEV_ID) { if (*rev == MV88F6183_REV_B0) { *dev_name = "MV88F6183-Rev-B0"; } else { *dev_name = "MV88F6183-Rev-Unsupported"; } } else { *dev_name = "Device-Unknown"; } } void __init orion5x_init(void) { char *dev_name; u32 dev, rev; orion5x_id(&dev, &rev, &dev_name); printk(KERN_INFO "Orion ID: %s. TCLK=%d.\n", dev_name, orion5x_tclk); /* * Setup Orion address map */ orion5x_setup_wins(); /* Setup root of clk tree */ clk_init(); /* * Don't issue "Wait for Interrupt" instruction if we are * running on D0 5281 silicon. */ if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); cpu_idle_poll_ctrl(true); } /* * The 5082/5181l/5182/6082/6082l/6183 have crypto * while 5180n/5181/5281 don't have crypto. */ if ((dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0) || dev == MV88F5182_DEV_ID || dev == MV88F6183_DEV_ID) orion5x_crypto_init(); /* * Register watchdog driver */ orion5x_wdt_init(); } void orion5x_restart(enum reboot_mode mode, const char *cmd) { /* * Enable and issue soft reset */ orion5x_setbits(RSTOUTn_MASK, (1 << 2)); orion5x_setbits(CPU_SOFT_RESET, 1); mdelay(200); orion5x_clrbits(CPU_SOFT_RESET, 1); } /* * Many orion-based systems have buggy bootloader implementations. * This is a common fixup for bogus memory tags. */ void __init tag_fixup_mem32(struct tag *t, char **from) { for (; t->hdr.size; t = tag_next(t)) if (t->hdr.tag == ATAG_MEM && (!t->u.mem.size || t->u.mem.size & ~PAGE_MASK || t->u.mem.start & ~PAGE_MASK)) { printk(KERN_WARNING "Clearing invalid memory bank %dKB@0x%08x\n", t->u.mem.size / 1024, t->u.mem.start); t->hdr.tag = 0; } }
gpl-2.0
YoungjaeLee/linux-4.3-cxlbdev
arch/powerpc/platforms/cell/spu_manage.c
1963
13271
/* * spu management operations for of based platforms * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. * (C) Copyright 2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/prom.h> #include "spufs/spufs.h" #include "interrupt.h" struct device_node *spu_devnode(struct spu *spu) { return spu->devnode; } EXPORT_SYMBOL_GPL(spu_devnode); static u64 __init find_spu_unit_number(struct device_node *spe) { const unsigned int *prop; int proplen; /* new device trees should provide the physical-id attribute */ prop = of_get_property(spe, "physical-id", &proplen); if (proplen == 4) return (u64)*prop; /* celleb device tree provides the unit-id */ prop = of_get_property(spe, "unit-id", &proplen); if (proplen == 4) return (u64)*prop; /* legacy device trees provide the id in the reg attribute */ prop = of_get_property(spe, "reg", &proplen); if (proplen == 4) return (u64)*prop; return 0; } static void spu_unmap(struct spu *spu) { if (!firmware_has_feature(FW_FEATURE_LPAR)) iounmap(spu->priv1); iounmap(spu->priv2); iounmap(spu->problem); iounmap((__force u8 __iomem *)spu->local_store); } static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) { unsigned int isrc; const u32 *tmp; int nid; /* Get the interrupt source unit from the device-tree */ tmp = of_get_property(np, "isrc", NULL); if (!tmp) return -ENODEV; isrc = tmp[0]; tmp = of_get_property(np->parent->parent, "node-id", NULL); if (!tmp) { printk(KERN_WARNING "%s: can't find node-id\n", __func__); nid = spu->node; } else nid = tmp[0]; /* Add the node number */ isrc |= nid << IIC_IRQ_NODE_SHIFT; /* Now map interrupts of all 3 classes */ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); /* Right now, we only fail if class 2 failed */ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; } static void __iomem * __init spu_map_prop_old(struct spu *spu, struct device_node *n, const char *name) { const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; int proplen; prop = of_get_property(n, name, &proplen); if (prop == NULL || proplen != sizeof (struct address_prop)) return NULL; return ioremap(prop->address, prop->len); } static int __init spu_map_device_old(struct spu *spu) { struct device_node *node = spu->devnode; const char *prop; int ret; ret = -ENODEV; spu->name = of_get_property(node, "name", NULL); if (!spu->name) goto out; prop = of_get_property(node, "local-store", NULL); if (!prop) goto out; spu->local_store_phys = *(unsigned long *)prop; /* we use local store as ram, not io memory */ spu->local_store = (void __force *) spu_map_prop_old(spu, node, "local-store"); if (!spu->local_store) goto out; prop = of_get_property(node, "problem", NULL); if (!prop) goto out_unmap; spu->problem_phys = *(unsigned long *)prop; spu->problem = spu_map_prop_old(spu, node, "problem"); if (!spu->problem) goto out_unmap; spu->priv2 = spu_map_prop_old(spu, node, "priv2"); if (!spu->priv2) goto out_unmap; if (!firmware_has_feature(FW_FEATURE_LPAR)) { spu->priv1 = spu_map_prop_old(spu, node, "priv1"); if (!spu->priv1) goto out_unmap; } ret = 0; goto out; out_unmap: spu_unmap(spu); out: return ret; } static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { struct of_phandle_args oirq; int ret; int i; for (i=0; i < 3; i++) { ret = of_irq_parse_one(np, i, &oirq); if (ret) { pr_debug("spu_new: failed to get irq %d\n", i); goto err; } ret = -EINVAL; pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0], oirq.np->full_name); spu->irqs[i] = irq_create_of_mapping(&oirq); if (spu->irqs[i] == NO_IRQ) { pr_debug("spu_new: failed to map it !\n"); goto err; } } return 0; err: pr_debug("failed to map irq %x for spu %s\n", *oirq.args, spu->name); for (; i >= 0; i--) { if (spu->irqs[i] != NO_IRQ) irq_dispose_mapping(spu->irqs[i]); } return ret; } static int spu_map_resource(struct spu *spu, int nr, void __iomem** virt, unsigned long *phys) { struct device_node *np = spu->devnode; struct resource resource = { }; unsigned long len; int ret; ret = of_address_to_resource(np, nr, &resource); if (ret) return ret; if (phys) *phys = resource.start; len = resource_size(&resource); *virt = ioremap(resource.start, len); if (!*virt) return -EINVAL; return 0; } static int __init spu_map_device(struct spu *spu) { struct device_node *np = spu->devnode; int ret = -ENODEV; spu->name = of_get_property(np, "name", NULL); if (!spu->name) goto out; ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, &spu->local_store_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 0\n", np->full_name); goto out; } ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, &spu->problem_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 1\n", np->full_name); goto out_unmap; } ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 2\n", np->full_name); goto out_unmap; } if (!firmware_has_feature(FW_FEATURE_LPAR)) ret = spu_map_resource(spu, 3, (void __iomem**)&spu->priv1, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 3\n", np->full_name); goto out_unmap; } pr_debug("spu_new: %s maps:\n", np->full_name); pr_debug(" local store : 0x%016lx -> 0x%p\n", spu->local_store_phys, spu->local_store); pr_debug(" problem state : 0x%016lx -> 0x%p\n", spu->problem_phys, spu->problem); pr_debug(" priv2 : 0x%p\n", spu->priv2); pr_debug(" priv1 : 0x%p\n", spu->priv1); return 0; out_unmap: spu_unmap(spu); out: pr_debug("failed to map spe %s: %d\n", spu->name, ret); return ret; } static int __init of_enumerate_spus(int (*fn)(void *data)) { int ret; struct device_node *node; unsigned int n = 0; ret = -ENODEV; for (node = of_find_node_by_type(NULL, "spe"); node; node = of_find_node_by_type(node, "spe")) { ret = fn(node); if (ret) { printk(KERN_WARNING "%s: Error initializing %s\n", __func__, node->name); break; } n++; } return ret ? ret : n; } static int __init of_create_spu(struct spu *spu, void *data) { int ret; struct device_node *spe = (struct device_node *)data; static int legacy_map = 0, legacy_irq = 0; spu->devnode = of_node_get(spe); spu->spe_id = find_spu_unit_number(spe); spu->node = of_node_to_nid(spe); if (spu->node >= MAX_NUMNODES) { printk(KERN_WARNING "SPE %s on node %d ignored," " node number too big\n", spe->full_name, spu->node); printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); ret = -ENODEV; goto out; } ret = spu_map_device(spu); if (ret) { if (!legacy_map) { legacy_map = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying to map old style\n", __func__); } ret = spu_map_device_old(spu); if (ret) { printk(KERN_ERR "Unable to map %s\n", spu->name); goto out; } } ret = spu_map_interrupts(spu, spe); if (ret) { if (!legacy_irq) { legacy_irq = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying old style irq\n", __func__); } ret = spu_map_interrupts_old(spu, spe); if (ret) { printk(KERN_ERR "%s: could not map interrupts\n", spu->name); goto out_unmap; } } pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, spu->local_store, spu->problem, spu->priv1, spu->priv2, spu->number); goto out; out_unmap: spu_unmap(spu); out: return ret; } static int of_destroy_spu(struct spu *spu) { spu_unmap(spu); of_node_put(spu->devnode); return 0; } static void enable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_start(ctx); } static void disable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_stop(ctx); } /* Hardcoded affinity idxs for qs20 */ #define QS20_SPES_PER_BE 8 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; static struct spu *spu_lookup_reg(int node, u32 reg) { struct spu *spu; const u32 *spu_reg; list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); if (*spu_reg == reg) return spu; } return NULL; } static void init_affinity_qs20_harcoded(void) { int node, i; struct spu *last_spu, *spu; u32 reg; for (node = 0; node < MAX_NUMNODES; node++) { last_spu = NULL; for (i = 0; i < QS20_SPES_PER_BE; i++) { reg = qs20_reg_idxs[i]; spu = spu_lookup_reg(node, reg); if (!spu) continue; spu->has_mem_affinity = qs20_reg_memory[reg]; if (last_spu) list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; } } } static int of_has_vicinity(void) { struct device_node *dn; for_each_node_by_type(dn, "spe") { if (of_find_property(dn, "vicinity", NULL)) { of_node_put(dn); return 1; } } return 0; } static struct spu *devnode_spu(int cbe, struct device_node *dn) { struct spu *spu; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) if (spu_devnode(spu) == dn) return spu; return NULL; } static struct spu * neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) { struct spu *spu; struct device_node *spu_dn; const phandle *vic_handles; int lenp, i; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { spu_dn = spu_devnode(spu); if (spu_dn == avoid) continue; vic_handles = of_get_property(spu_dn, "vicinity", &lenp); for (i=0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == target->phandle) return spu; } } return NULL; } static void init_affinity_node(int cbe) { struct spu *spu, *last_spu; struct device_node *vic_dn, *last_spu_dn; phandle avoid_ph; const phandle *vic_handles; const char *name; int lenp, i, added; last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, cbe_list); avoid_ph = 0; for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { last_spu_dn = spu_devnode(last_spu); vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); /* * Walk through each phandle in vicinity property of the spu * (tipically two vicinity phandles per spe node) */ for (i = 0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == avoid_ph) continue; vic_dn = of_find_node_by_phandle(vic_handles[i]); if (!vic_dn) continue; /* a neighbour might be spe, mic-tm, or bif0 */ name = of_get_property(vic_dn, "name", NULL); if (!name) continue; if (strcmp(name, "spe") == 0) { spu = devnode_spu(cbe, vic_dn); avoid_ph = last_spu_dn->phandle; } else { /* * "mic-tm" and "bif0" nodes do not have * vicinity property. So we need to find the * spe which has vic_dn as neighbour, but * skipping the one we came from (last_spu_dn) */ spu = neighbour_spu(cbe, vic_dn, last_spu_dn); if (!spu) continue; if (!strcmp(name, "mic-tm")) { last_spu->has_mem_affinity = 1; spu->has_mem_affinity = 1; } avoid_ph = vic_dn->phandle; } list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; break; } } } static void init_affinity_fw(void) { int cbe; for (cbe = 0; cbe < MAX_NUMNODES; cbe++) init_affinity_node(cbe); } static int __init init_affinity(void) { if (of_has_vicinity()) { init_affinity_fw(); } else { long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) init_affinity_qs20_harcoded(); else printk("No affinity configuration found\n"); } return 0; } const struct spu_management_ops spu_management_of_ops = { .enumerate_spus = of_enumerate_spus, .create_spu = of_create_spu, .destroy_spu = of_destroy_spu, .enable_spu = enable_spu_by_master_run, .disable_spu = disable_spu_by_master_run, .init_affinity = init_affinity, };
gpl-2.0
henrix/beagle-linux
arch/powerpc/platforms/cell/spu_manage.c
1963
13271
/* * spu management operations for of based platforms * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. * (C) Copyright 2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/prom.h> #include "spufs/spufs.h" #include "interrupt.h" struct device_node *spu_devnode(struct spu *spu) { return spu->devnode; } EXPORT_SYMBOL_GPL(spu_devnode); static u64 __init find_spu_unit_number(struct device_node *spe) { const unsigned int *prop; int proplen; /* new device trees should provide the physical-id attribute */ prop = of_get_property(spe, "physical-id", &proplen); if (proplen == 4) return (u64)*prop; /* celleb device tree provides the unit-id */ prop = of_get_property(spe, "unit-id", &proplen); if (proplen == 4) return (u64)*prop; /* legacy device trees provide the id in the reg attribute */ prop = of_get_property(spe, "reg", &proplen); if (proplen == 4) return (u64)*prop; return 0; } static void spu_unmap(struct spu *spu) { if (!firmware_has_feature(FW_FEATURE_LPAR)) iounmap(spu->priv1); iounmap(spu->priv2); iounmap(spu->problem); iounmap((__force u8 __iomem *)spu->local_store); } static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) { unsigned int isrc; const u32 *tmp; int nid; /* Get the interrupt source unit from the device-tree */ tmp = of_get_property(np, "isrc", NULL); if (!tmp) return -ENODEV; isrc = tmp[0]; tmp = of_get_property(np->parent->parent, "node-id", NULL); if (!tmp) { printk(KERN_WARNING "%s: can't find node-id\n", __func__); nid = spu->node; } else nid = tmp[0]; /* Add the node number */ isrc |= nid << IIC_IRQ_NODE_SHIFT; /* Now map interrupts of all 3 classes */ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); /* Right now, we only fail if class 2 failed */ return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; } static void __iomem * __init spu_map_prop_old(struct spu *spu, struct device_node *n, const char *name) { const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; int proplen; prop = of_get_property(n, name, &proplen); if (prop == NULL || proplen != sizeof (struct address_prop)) return NULL; return ioremap(prop->address, prop->len); } static int __init spu_map_device_old(struct spu *spu) { struct device_node *node = spu->devnode; const char *prop; int ret; ret = -ENODEV; spu->name = of_get_property(node, "name", NULL); if (!spu->name) goto out; prop = of_get_property(node, "local-store", NULL); if (!prop) goto out; spu->local_store_phys = *(unsigned long *)prop; /* we use local store as ram, not io memory */ spu->local_store = (void __force *) spu_map_prop_old(spu, node, "local-store"); if (!spu->local_store) goto out; prop = of_get_property(node, "problem", NULL); if (!prop) goto out_unmap; spu->problem_phys = *(unsigned long *)prop; spu->problem = spu_map_prop_old(spu, node, "problem"); if (!spu->problem) goto out_unmap; spu->priv2 = spu_map_prop_old(spu, node, "priv2"); if (!spu->priv2) goto out_unmap; if (!firmware_has_feature(FW_FEATURE_LPAR)) { spu->priv1 = spu_map_prop_old(spu, node, "priv1"); if (!spu->priv1) goto out_unmap; } ret = 0; goto out; out_unmap: spu_unmap(spu); out: return ret; } static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { struct of_phandle_args oirq; int ret; int i; for (i=0; i < 3; i++) { ret = of_irq_parse_one(np, i, &oirq); if (ret) { pr_debug("spu_new: failed to get irq %d\n", i); goto err; } ret = -EINVAL; pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0], oirq.np->full_name); spu->irqs[i] = irq_create_of_mapping(&oirq); if (spu->irqs[i] == NO_IRQ) { pr_debug("spu_new: failed to map it !\n"); goto err; } } return 0; err: pr_debug("failed to map irq %x for spu %s\n", *oirq.args, spu->name); for (; i >= 0; i--) { if (spu->irqs[i] != NO_IRQ) irq_dispose_mapping(spu->irqs[i]); } return ret; } static int spu_map_resource(struct spu *spu, int nr, void __iomem** virt, unsigned long *phys) { struct device_node *np = spu->devnode; struct resource resource = { }; unsigned long len; int ret; ret = of_address_to_resource(np, nr, &resource); if (ret) return ret; if (phys) *phys = resource.start; len = resource_size(&resource); *virt = ioremap(resource.start, len); if (!*virt) return -EINVAL; return 0; } static int __init spu_map_device(struct spu *spu) { struct device_node *np = spu->devnode; int ret = -ENODEV; spu->name = of_get_property(np, "name", NULL); if (!spu->name) goto out; ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, &spu->local_store_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 0\n", np->full_name); goto out; } ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, &spu->problem_phys); if (ret) { pr_debug("spu_new: failed to map %s resource 1\n", np->full_name); goto out_unmap; } ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 2\n", np->full_name); goto out_unmap; } if (!firmware_has_feature(FW_FEATURE_LPAR)) ret = spu_map_resource(spu, 3, (void __iomem**)&spu->priv1, NULL); if (ret) { pr_debug("spu_new: failed to map %s resource 3\n", np->full_name); goto out_unmap; } pr_debug("spu_new: %s maps:\n", np->full_name); pr_debug(" local store : 0x%016lx -> 0x%p\n", spu->local_store_phys, spu->local_store); pr_debug(" problem state : 0x%016lx -> 0x%p\n", spu->problem_phys, spu->problem); pr_debug(" priv2 : 0x%p\n", spu->priv2); pr_debug(" priv1 : 0x%p\n", spu->priv1); return 0; out_unmap: spu_unmap(spu); out: pr_debug("failed to map spe %s: %d\n", spu->name, ret); return ret; } static int __init of_enumerate_spus(int (*fn)(void *data)) { int ret; struct device_node *node; unsigned int n = 0; ret = -ENODEV; for (node = of_find_node_by_type(NULL, "spe"); node; node = of_find_node_by_type(node, "spe")) { ret = fn(node); if (ret) { printk(KERN_WARNING "%s: Error initializing %s\n", __func__, node->name); break; } n++; } return ret ? ret : n; } static int __init of_create_spu(struct spu *spu, void *data) { int ret; struct device_node *spe = (struct device_node *)data; static int legacy_map = 0, legacy_irq = 0; spu->devnode = of_node_get(spe); spu->spe_id = find_spu_unit_number(spe); spu->node = of_node_to_nid(spe); if (spu->node >= MAX_NUMNODES) { printk(KERN_WARNING "SPE %s on node %d ignored," " node number too big\n", spe->full_name, spu->node); printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); ret = -ENODEV; goto out; } ret = spu_map_device(spu); if (ret) { if (!legacy_map) { legacy_map = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying to map old style\n", __func__); } ret = spu_map_device_old(spu); if (ret) { printk(KERN_ERR "Unable to map %s\n", spu->name); goto out; } } ret = spu_map_interrupts(spu, spe); if (ret) { if (!legacy_irq) { legacy_irq = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying old style irq\n", __func__); } ret = spu_map_interrupts_old(spu, spe); if (ret) { printk(KERN_ERR "%s: could not map interrupts\n", spu->name); goto out_unmap; } } pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, spu->local_store, spu->problem, spu->priv1, spu->priv2, spu->number); goto out; out_unmap: spu_unmap(spu); out: return ret; } static int of_destroy_spu(struct spu *spu) { spu_unmap(spu); of_node_put(spu->devnode); return 0; } static void enable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_start(ctx); } static void disable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_stop(ctx); } /* Hardcoded affinity idxs for qs20 */ #define QS20_SPES_PER_BE 8 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; static struct spu *spu_lookup_reg(int node, u32 reg) { struct spu *spu; const u32 *spu_reg; list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); if (*spu_reg == reg) return spu; } return NULL; } static void init_affinity_qs20_harcoded(void) { int node, i; struct spu *last_spu, *spu; u32 reg; for (node = 0; node < MAX_NUMNODES; node++) { last_spu = NULL; for (i = 0; i < QS20_SPES_PER_BE; i++) { reg = qs20_reg_idxs[i]; spu = spu_lookup_reg(node, reg); if (!spu) continue; spu->has_mem_affinity = qs20_reg_memory[reg]; if (last_spu) list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; } } } static int of_has_vicinity(void) { struct device_node *dn; for_each_node_by_type(dn, "spe") { if (of_find_property(dn, "vicinity", NULL)) { of_node_put(dn); return 1; } } return 0; } static struct spu *devnode_spu(int cbe, struct device_node *dn) { struct spu *spu; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) if (spu_devnode(spu) == dn) return spu; return NULL; } static struct spu * neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) { struct spu *spu; struct device_node *spu_dn; const phandle *vic_handles; int lenp, i; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { spu_dn = spu_devnode(spu); if (spu_dn == avoid) continue; vic_handles = of_get_property(spu_dn, "vicinity", &lenp); for (i=0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == target->phandle) return spu; } } return NULL; } static void init_affinity_node(int cbe) { struct spu *spu, *last_spu; struct device_node *vic_dn, *last_spu_dn; phandle avoid_ph; const phandle *vic_handles; const char *name; int lenp, i, added; last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, cbe_list); avoid_ph = 0; for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { last_spu_dn = spu_devnode(last_spu); vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); /* * Walk through each phandle in vicinity property of the spu * (tipically two vicinity phandles per spe node) */ for (i = 0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == avoid_ph) continue; vic_dn = of_find_node_by_phandle(vic_handles[i]); if (!vic_dn) continue; /* a neighbour might be spe, mic-tm, or bif0 */ name = of_get_property(vic_dn, "name", NULL); if (!name) continue; if (strcmp(name, "spe") == 0) { spu = devnode_spu(cbe, vic_dn); avoid_ph = last_spu_dn->phandle; } else { /* * "mic-tm" and "bif0" nodes do not have * vicinity property. So we need to find the * spe which has vic_dn as neighbour, but * skipping the one we came from (last_spu_dn) */ spu = neighbour_spu(cbe, vic_dn, last_spu_dn); if (!spu) continue; if (!strcmp(name, "mic-tm")) { last_spu->has_mem_affinity = 1; spu->has_mem_affinity = 1; } avoid_ph = vic_dn->phandle; } list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; break; } } } static void init_affinity_fw(void) { int cbe; for (cbe = 0; cbe < MAX_NUMNODES; cbe++) init_affinity_node(cbe); } static int __init init_affinity(void) { if (of_has_vicinity()) { init_affinity_fw(); } else { long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) init_affinity_qs20_harcoded(); else printk("No affinity configuration found\n"); } return 0; } const struct spu_management_ops spu_management_of_ops = { .enumerate_spus = of_enumerate_spus, .create_spu = of_create_spu, .destroy_spu = of_destroy_spu, .enable_spu = enable_spu_by_master_run, .disable_spu = disable_spu_by_master_run, .init_affinity = init_affinity, };
gpl-2.0
DevChun/ace-ics-kernel
arch/arm/mach-sa1100/jornada720.c
2475
12380
/* * linux/arch/arm/mach-sa1100/jornada720.c * * HP Jornada720 init code * * Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com> * Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl> * Copyright (C) 2005 Michael Gernoth <michael@gernoth.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <video/s1d13xxxfb.h> #include <mach/hardware.h> #include <asm/hardware/sa1111.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/mach/serial_sa1100.h> #include "generic.h" /* * HP Documentation referred in this file: * http://www.jlime.com/downloads/development/docs/jornada7xx/jornada720.txt */ /* line 110 of HP's doc */ #define TUCR_VAL 0x20000400 /* memory space (line 52 of HP's doc) */ #define SA1111REGSTART 0x40000000 #define SA1111REGLEN 0x00001fff #define EPSONREGSTART 0x48000000 #define EPSONREGLEN 0x00100000 #define EPSONFBSTART 0x48200000 /* 512kB framebuffer */ #define EPSONFBLEN 512*1024 static struct s1d13xxxfb_regval s1d13xxxfb_initregs[] = { /* line 344 of HP's doc */ {0x0001,0x00}, // Miscellaneous Register {0x01FC,0x00}, // Display Mode Register {0x0004,0x00}, // General IO Pins Configuration Register 0 {0x0005,0x00}, // General IO Pins Configuration Register 1 {0x0008,0x00}, // General IO Pins Control Register 0 {0x0009,0x00}, // General IO Pins Control Register 1 {0x0010,0x01}, // Memory Clock Configuration Register {0x0014,0x11}, // LCD Pixel Clock Configuration Register {0x0018,0x01}, // CRT/TV Pixel Clock Configuration Register {0x001C,0x01}, // MediaPlug Clock Configuration Register {0x001E,0x01}, // CPU To Memory Wait State Select Register {0x0020,0x00}, // Memory Configuration Register {0x0021,0x45}, // DRAM Refresh Rate Register {0x002A,0x01}, // DRAM Timings Control Register 0 {0x002B,0x03}, // DRAM Timings Control Register 1 {0x0030,0x1c}, // Panel Type Register {0x0031,0x00}, // MOD Rate Register {0x0032,0x4F}, // LCD Horizontal Display Width Register {0x0034,0x07}, // LCD Horizontal Non-Display Period Register {0x0035,0x01}, // TFT FPLINE Start Position Register {0x0036,0x0B}, // TFT FPLINE Pulse Width Register {0x0038,0xEF}, // LCD Vertical Display Height Register 0 {0x0039,0x00}, // LCD Vertical Display Height Register 1 {0x003A,0x13}, // LCD Vertical Non-Display Period Register {0x003B,0x0B}, // TFT FPFRAME Start Position Register {0x003C,0x01}, // TFT FPFRAME Pulse Width Register {0x0040,0x05}, // LCD Display Mode Register (2:4bpp,3:8bpp,5:16bpp) {0x0041,0x00}, // LCD Miscellaneous Register {0x0042,0x00}, // LCD Display Start Address Register 0 {0x0043,0x00}, // LCD Display Start Address Register 1 {0x0044,0x00}, // LCD Display Start Address Register 2 {0x0046,0x80}, // LCD Memory Address Offset Register 0 {0x0047,0x02}, // LCD Memory Address Offset Register 1 {0x0048,0x00}, // LCD Pixel Panning Register {0x004A,0x00}, // LCD Display FIFO High Threshold Control Register {0x004B,0x00}, // LCD Display FIFO Low Threshold Control Register {0x0050,0x4F}, // CRT/TV Horizontal Display Width Register {0x0052,0x13}, // CRT/TV Horizontal Non-Display Period Register {0x0053,0x01}, // CRT/TV HRTC Start Position Register {0x0054,0x0B}, // CRT/TV HRTC Pulse Width Register {0x0056,0xDF}, // CRT/TV Vertical Display Height Register 0 {0x0057,0x01}, // CRT/TV Vertical Display Height Register 1 {0x0058,0x2B}, // CRT/TV Vertical Non-Display Period Register {0x0059,0x09}, // CRT/TV VRTC Start Position Register {0x005A,0x01}, // CRT/TV VRTC Pulse Width Register {0x005B,0x10}, // TV Output Control Register {0x0060,0x03}, // CRT/TV Display Mode Register (2:4bpp,3:8bpp,5:16bpp) {0x0062,0x00}, // CRT/TV Display Start Address Register 0 {0x0063,0x00}, // CRT/TV Display Start Address Register 1 {0x0064,0x00}, // CRT/TV Display Start Address Register 2 {0x0066,0x40}, // CRT/TV Memory Address Offset Register 0 {0x0067,0x01}, // CRT/TV Memory Address Offset Register 1 {0x0068,0x00}, // CRT/TV Pixel Panning Register {0x006A,0x00}, // CRT/TV Display FIFO High Threshold Control Register {0x006B,0x00}, // CRT/TV Display FIFO Low Threshold Control Register {0x0070,0x00}, // LCD Ink/Cursor Control Register {0x0071,0x01}, // LCD Ink/Cursor Start Address Register {0x0072,0x00}, // LCD Cursor X Position Register 0 {0x0073,0x00}, // LCD Cursor X Position Register 1 {0x0074,0x00}, // LCD Cursor Y Position Register 0 {0x0075,0x00}, // LCD Cursor Y Position Register 1 {0x0076,0x00}, // LCD Ink/Cursor Blue Color 0 Register {0x0077,0x00}, // LCD Ink/Cursor Green Color 0 Register {0x0078,0x00}, // LCD Ink/Cursor Red Color 0 Register {0x007A,0x1F}, // LCD Ink/Cursor Blue Color 1 Register {0x007B,0x3F}, // LCD Ink/Cursor Green Color 1 Register {0x007C,0x1F}, // LCD Ink/Cursor Red Color 1 Register {0x007E,0x00}, // LCD Ink/Cursor FIFO Threshold Register {0x0080,0x00}, // CRT/TV Ink/Cursor Control Register {0x0081,0x01}, // CRT/TV Ink/Cursor Start Address Register {0x0082,0x00}, // CRT/TV Cursor X Position Register 0 {0x0083,0x00}, // CRT/TV Cursor X Position Register 1 {0x0084,0x00}, // CRT/TV Cursor Y Position Register 0 {0x0085,0x00}, // CRT/TV Cursor Y Position Register 1 {0x0086,0x00}, // CRT/TV Ink/Cursor Blue Color 0 Register {0x0087,0x00}, // CRT/TV Ink/Cursor Green Color 0 Register {0x0088,0x00}, // CRT/TV Ink/Cursor Red Color 0 Register {0x008A,0x1F}, // CRT/TV Ink/Cursor Blue Color 1 Register {0x008B,0x3F}, // CRT/TV Ink/Cursor Green Color 1 Register {0x008C,0x1F}, // CRT/TV Ink/Cursor Red Color 1 Register {0x008E,0x00}, // CRT/TV Ink/Cursor FIFO Threshold Register {0x0100,0x00}, // BitBlt Control Register 0 {0x0101,0x00}, // BitBlt Control Register 1 {0x0102,0x00}, // BitBlt ROP Code/Color Expansion Register {0x0103,0x00}, // BitBlt Operation Register {0x0104,0x00}, // BitBlt Source Start Address Register 0 {0x0105,0x00}, // BitBlt Source Start Address Register 1 {0x0106,0x00}, // BitBlt Source Start Address Register 2 {0x0108,0x00}, // BitBlt Destination Start Address Register 0 {0x0109,0x00}, // BitBlt Destination Start Address Register 1 {0x010A,0x00}, // BitBlt Destination Start Address Register 2 {0x010C,0x00}, // BitBlt Memory Address Offset Register 0 {0x010D,0x00}, // BitBlt Memory Address Offset Register 1 {0x0110,0x00}, // BitBlt Width Register 0 {0x0111,0x00}, // BitBlt Width Register 1 {0x0112,0x00}, // BitBlt Height Register 0 {0x0113,0x00}, // BitBlt Height Register 1 {0x0114,0x00}, // BitBlt Background Color Register 0 {0x0115,0x00}, // BitBlt Background Color Register 1 {0x0118,0x00}, // BitBlt Foreground Color Register 0 {0x0119,0x00}, // BitBlt Foreground Color Register 1 {0x01E0,0x00}, // Look-Up Table Mode Register {0x01E2,0x00}, // Look-Up Table Address Register /* not sure, wouldn't like to mess with the driver */ {0x01E4,0x00}, // Look-Up Table Data Register /* jornada doc says 0x00, but I trust the driver */ {0x01F0,0x10}, // Power Save Configuration Register {0x01F1,0x00}, // Power Save Status Register {0x01F4,0x00}, // CPU-to-Memory Access Watchdog Timer Register {0x01FC,0x01}, // Display Mode Register(0x01:LCD, 0x02:CRT, 0x03:LCD&CRT) }; static struct s1d13xxxfb_pdata s1d13xxxfb_data = { .initregs = s1d13xxxfb_initregs, .initregssize = ARRAY_SIZE(s1d13xxxfb_initregs), .platform_init_video = NULL }; static struct resource s1d13xxxfb_resources[] = { [0] = { .start = EPSONFBSTART, .end = EPSONFBSTART + EPSONFBLEN, .flags = IORESOURCE_MEM, }, [1] = { .start = EPSONREGSTART, .end = EPSONREGSTART + EPSONREGLEN, .flags = IORESOURCE_MEM, } }; static struct platform_device s1d13xxxfb_device = { .name = S1D_DEVICENAME, .id = 0, .dev = { .platform_data = &s1d13xxxfb_data, }, .num_resources = ARRAY_SIZE(s1d13xxxfb_resources), .resource = s1d13xxxfb_resources, }; static struct resource sa1111_resources[] = { [0] = { .start = SA1111REGSTART, .end = SA1111REGSTART + SA1111REGLEN, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_GPIO1, .end = IRQ_GPIO1, .flags = IORESOURCE_IRQ, }, }; static struct sa1111_platform_data sa1111_info = { .irq_base = IRQ_BOARD_END, }; static u64 sa1111_dmamask = 0xffffffffUL; static struct platform_device sa1111_device = { .name = "sa1111", .id = 0, .dev = { .dma_mask = &sa1111_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &sa1111_info, }, .num_resources = ARRAY_SIZE(sa1111_resources), .resource = sa1111_resources, }; static struct platform_device jornada_ssp_device = { .name = "jornada_ssp", .id = -1, }; static struct platform_device jornada_kbd_device = { .name = "jornada720_kbd", .id = -1, }; static struct platform_device jornada_ts_device = { .name = "jornada_ts", .id = -1, }; static struct platform_device *devices[] __initdata = { &sa1111_device, &jornada_ssp_device, &s1d13xxxfb_device, &jornada_kbd_device, &jornada_ts_device, }; static int __init jornada720_init(void) { int ret = -ENODEV; if (machine_is_jornada720()) { /* we want to use gpio20 as input to drive the clock of our uart 3 */ GPDR |= GPIO_GPIO20; /* Clear gpio20 pin as input */ TUCR = TUCR_VAL; GPSR = GPIO_GPIO20; /* start gpio20 pin */ udelay(1); GPCR = GPIO_GPIO20; /* stop gpio20 */ udelay(1); GPSR = GPIO_GPIO20; /* restart gpio20 */ udelay(20); /* give it some time to restart */ ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } return ret; } arch_initcall(jornada720_init); static struct map_desc jornada720_io_desc[] __initdata = { { /* Epson registers */ .virtual = 0xf0000000, .pfn = __phys_to_pfn(EPSONREGSTART), .length = EPSONREGLEN, .type = MT_DEVICE }, { /* Epson frame buffer */ .virtual = 0xf1000000, .pfn = __phys_to_pfn(EPSONFBSTART), .length = EPSONFBLEN, .type = MT_DEVICE }, { /* SA-1111 */ .virtual = 0xf4000000, .pfn = __phys_to_pfn(SA1111REGSTART), .length = SA1111REGLEN, .type = MT_DEVICE } }; static void __init jornada720_map_io(void) { sa1100_map_io(); iotable_init(jornada720_io_desc, ARRAY_SIZE(jornada720_io_desc)); sa1100_register_uart(0, 3); sa1100_register_uart(1, 1); } static struct mtd_partition jornada720_partitions[] = { { .name = "JORNADA720 boot firmware", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "JORNADA720 kernel", .size = 0x000c0000, .offset = 0x00040000, }, { .name = "JORNADA720 params", .size = 0x00040000, .offset = 0x00100000, }, { .name = "JORNADA720 initrd", .size = 0x00100000, .offset = 0x00140000, }, { .name = "JORNADA720 root cramfs", .size = 0x00300000, .offset = 0x00240000, }, { .name = "JORNADA720 usr cramfs", .size = 0x00800000, .offset = 0x00540000, }, { .name = "JORNADA720 usr local", .size = 0, /* will expand to the end of the flash */ .offset = 0x00d00000, } }; static void jornada720_set_vpp(int vpp) { if (vpp) /* enabling flash write (line 470 of HP's doc) */ PPSR |= PPC_LDD7; else /* disabling flash write (line 470 of HP's doc) */ PPSR &= ~PPC_LDD7; PPDR |= PPC_LDD7; } static struct flash_platform_data jornada720_flash_data = { .map_name = "cfi_probe", .set_vpp = jornada720_set_vpp, .parts = jornada720_partitions, .nr_parts = ARRAY_SIZE(jornada720_partitions), }; static struct resource jornada720_flash_resource = { .start = SA1100_CS0_PHYS, .end = SA1100_CS0_PHYS + SZ_32M - 1, .flags = IORESOURCE_MEM, }; static void __init jornada720_mach_init(void) { sa11x0_register_mtd(&jornada720_flash_data, &jornada720_flash_resource, 1); } MACHINE_START(JORNADA720, "HP Jornada 720") /* Maintainer: Kristoffer Ericson <Kristoffer.Ericson@gmail.com> */ .boot_params = 0xc0000100, .map_io = jornada720_map_io, .init_irq = sa1100_init_irq, .timer = &sa1100_timer, .init_machine = jornada720_mach_init, MACHINE_END
gpl-2.0
badbear1727/E120LKERNEL
arch/um/drivers/net_kern.c
2731
20224
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. * Licensed under the GPL. */ #include <linux/bootmem.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "init.h" #include "irq_kern.h" #include "irq_user.h" #include "mconsole_kern.h" #include "net_kern.h" #include "net_user.h" #define DRIVER_NAME "uml-netdev" static DEFINE_SPINLOCK(opened_lock); static LIST_HEAD(opened); /* * The drop_skb is used when we can't allocate an skb. The * packet is read into drop_skb in order to get the data off the * connection to the host. * It is reallocated whenever a maximum packet size is seen which is * larger than any seen before. update_drop_skb is called from * eth_configure when a new interface is added. */ static DEFINE_SPINLOCK(drop_lock); static struct sk_buff *drop_skb; static int drop_max; static int update_drop_skb(int max) { struct sk_buff *new; unsigned long flags; int err = 0; spin_lock_irqsave(&drop_lock, flags); if (max <= drop_max) goto out; err = -ENOMEM; new = dev_alloc_skb(max); if (new == NULL) goto out; skb_put(new, max); kfree_skb(drop_skb); drop_skb = new; drop_max = max; err = 0; out: spin_unlock_irqrestore(&drop_lock, flags); return err; } static int uml_net_rx(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; /* If we can't allocate memory, try again next round. */ skb = dev_alloc_skb(lp->max_packet); if (skb == NULL) { drop_skb->dev = dev; /* Read a packet into drop_skb and don't do anything with it. */ (*lp->read)(lp->fd, drop_skb, lp); dev->stats.rx_dropped++; return 0; } skb->dev = dev; skb_put(skb, lp->max_packet); skb_reset_mac_header(skb); pkt_len = (*lp->read)(lp->fd, skb, lp); if (pkt_len > 0) { skb_trim(skb, pkt_len); skb->protocol = (*lp->protocol)(skb); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; netif_rx(skb); return pkt_len; } kfree_skb(skb); return pkt_len; } static void uml_dev_close(struct work_struct *work) { struct uml_net_private *lp = container_of(work, struct uml_net_private, work); dev_close(lp->dev); } static irqreturn_t uml_net_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct uml_net_private *lp = netdev_priv(dev); int err; if (!netif_running(dev)) return IRQ_NONE; spin_lock(&lp->lock); while ((err = uml_net_rx(dev)) > 0) ; if (err < 0) { printk(KERN_ERR "Device '%s' read returned %d, shutting it down\n", dev->name, err); /* dev_close can't be called in interrupt context, and takes * again lp->lock. * And dev_close() can be safely called multiple times on the * same device, since it tests for (dev->flags & IFF_UP). So * there's no harm in delaying the device shutdown. * Furthermore, the workqueue will not re-enqueue an already * enqueued work item. */ schedule_work(&lp->work); goto out; } reactivate_fd(lp->fd, UM_ETH_IRQ); out: spin_unlock(&lp->lock); return IRQ_HANDLED; } static int uml_net_open(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int err; if (lp->fd >= 0) { err = -ENXIO; goto out; } lp->fd = (*lp->open)(&lp->user); if (lp->fd < 0) { err = lp->fd; goto out; } err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev); if (err != 0) { printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); err = -ENETUNREACH; goto out_close; } lp->tl.data = (unsigned long) &lp->user; netif_start_queue(dev); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ while ((err = uml_net_rx(dev)) > 0) ; spin_lock(&opened_lock); list_add(&lp->list, &opened); spin_unlock(&opened_lock); return 0; out_close: if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; out: return err; } static int uml_net_close(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); free_irq(dev->irq, dev); if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; spin_lock(&opened_lock); list_del(&lp->list); spin_unlock(&opened_lock); return 0; } static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); unsigned long flags; int len; netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); len = (*lp->write)(lp->fd, skb, lp); if (len == skb->len) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ netif_wake_queue(dev); } else if (len == 0) { netif_start_queue(dev); dev->stats.tx_dropped++; } else { netif_start_queue(dev); printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); } spin_unlock_irqrestore(&lp->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void uml_net_set_multicast_list(struct net_device *dev) { return; } static void uml_net_tx_timeout(struct net_device *dev) { dev->trans_start = jiffies; netif_wake_queue(dev); } static int uml_net_change_mtu(struct net_device *dev, int new_mtu) { dev->mtu = new_mtu; return 0; } static void uml_net_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRIVER_NAME); strcpy(info->version, "42"); } static const struct ethtool_ops uml_net_ethtool_ops = { .get_drvinfo = uml_net_get_drvinfo, .get_link = ethtool_op_get_link, }; static void uml_net_user_timer_expire(unsigned long _conn) { #ifdef undef struct connection *conn = (struct connection *)_conn; dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); do_connect(conn); #endif } static void setup_etheraddr(char *str, unsigned char *addr, char *name) { char *end; int i; if (str == NULL) goto random; for (i = 0; i < 6; i++) { addr[i] = simple_strtoul(str, &end, 16); if ((end == str) || ((*end != ':') && (*end != ',') && (*end != '\0'))) { printk(KERN_ERR "setup_etheraddr: failed to parse '%s' " "as an ethernet address\n", str); goto random; } str = end + 1; } if (is_multicast_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign a multicast ethernet address to a " "device disallowed\n"); goto random; } if (!is_valid_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign an invalid ethernet address to a " "device disallowed\n"); goto random; } if (!is_local_ether_addr(addr)) { printk(KERN_WARNING "Warning: Assigning a globally valid ethernet " "address to a device\n"); printk(KERN_WARNING "You should set the 2nd rightmost bit in " "the first byte of the MAC,\n"); printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]); } return; random: printk(KERN_INFO "Choosing a random ethernet address for device %s\n", name); random_ether_addr(addr); } static DEFINE_SPINLOCK(devices_lock); static LIST_HEAD(devices); static struct platform_driver uml_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static void net_device_release(struct device *dev) { struct uml_net *device = dev_get_drvdata(dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); if (lp->remove != NULL) (*lp->remove)(&lp->user); list_del(&device->list); kfree(device); free_netdev(netdev); } static const struct net_device_ops uml_netdev_ops = { .ndo_open = uml_net_open, .ndo_stop = uml_net_close, .ndo_start_xmit = uml_net_start_xmit, .ndo_set_multicast_list = uml_net_set_multicast_list, .ndo_tx_timeout = uml_net_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = uml_net_change_mtu, .ndo_validate_addr = eth_validate_addr, }; /* * Ensures that platform_driver_register is called only once by * eth_configure. Will be set in an initcall. */ static int driver_registered; static void eth_configure(int n, void *init, char *mac, struct transport *transport) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; int err, size; size = transport->private_size + sizeof(struct uml_net_private); device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) { printk(KERN_ERR "eth_configure failed to allocate struct " "uml_net\n"); return; } dev = alloc_etherdev(size); if (dev == NULL) { printk(KERN_ERR "eth_configure: failed to allocate struct " "net_device for eth%d\n", n); goto out_free_device; } INIT_LIST_HEAD(&device->list); device->index = n; /* If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof(dev->name), "eth%d", n); setup_etheraddr(mac, device->mac, dev->name); printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); lp = netdev_priv(dev); /* This points to the transport private data. It's still clear, but we * must memset it to 0 *now*. Let's help the drivers. */ memset(lp, 0, size); INIT_WORK(&lp->work, uml_dev_close); /* sysfs register */ if (!driver_registered) { platform_driver_register(&uml_net_driver); driver_registered = 1; } device->pdev.id = n; device->pdev.name = DRIVER_NAME; device->pdev.dev.release = net_device_release; dev_set_drvdata(&device->pdev.dev, device); if (platform_device_register(&device->pdev)) goto out_free_netdev; SET_NETDEV_DEV(dev,&device->pdev.dev); device->dev = dev; /* * These just fill in a data structure, so there's no failure * to be worried about. */ (*transport->kern->init)(dev, init); *lp = ((struct uml_net_private) { .list = LIST_HEAD_INIT(lp->list), .dev = dev, .fd = -1, .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, .max_packet = transport->user->max_packet, .protocol = transport->kern->protocol, .open = transport->user->open, .close = transport->user->close, .remove = transport->user->remove, .read = transport->kern->read, .write = transport->kern->write, .add_address = transport->user->add_address, .delete_address = transport->user->delete_address }); init_timer(&lp->tl); spin_lock_init(&lp->lock); lp->tl.function = uml_net_user_timer_expire; memcpy(lp->mac, device->mac, sizeof(lp->mac)); if ((transport->user->init != NULL) && ((*transport->user->init)(&lp->user, dev) != 0)) goto out_unregister; /* don't use eth_mac_addr, it will not work here */ memcpy(dev->dev_addr, device->mac, ETH_ALEN); dev->mtu = transport->user->mtu; dev->netdev_ops = &uml_netdev_ops; dev->ethtool_ops = &uml_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); dev->irq = UM_ETH_IRQ; err = update_drop_skb(lp->max_packet); if (err) goto out_undo_user_init; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); if (err) goto out_undo_user_init; spin_lock(&devices_lock); list_add(&device->list, &devices); spin_unlock(&devices_lock); return; out_undo_user_init: if (transport->user->remove != NULL) (*transport->user->remove)(&lp->user); out_unregister: platform_device_unregister(&device->pdev); return; /* platform_device_unregister frees dev and device */ out_free_netdev: free_netdev(dev); out_free_device: kfree(device); } static struct uml_net *find_device(int n) { struct uml_net *device; struct list_head *ele; spin_lock(&devices_lock); list_for_each(ele, &devices) { device = list_entry(ele, struct uml_net, list); if (device->index == n) goto out; } device = NULL; out: spin_unlock(&devices_lock); return device; } static int eth_parse(char *str, int *index_out, char **str_out, char **error_out) { char *end; int n, err = -EINVAL; n = simple_strtoul(str, &end, 0); if (end == str) { *error_out = "Bad device number"; return err; } str = end; if (*str != '=') { *error_out = "Expected '=' after device number"; return err; } str++; if (find_device(n)) { *error_out = "Device already configured"; return err; } *index_out = n; *str_out = str; return 0; } struct eth_init { struct list_head list; char *init; int index; }; static DEFINE_SPINLOCK(transports_lock); static LIST_HEAD(transports); /* Filled in during early boot */ static LIST_HEAD(eth_cmd_line); static int check_transport(struct transport *transport, char *eth, int n, void **init_out, char **mac_out) { int len; len = strlen(transport->name); if (strncmp(eth, transport->name, len)) return 0; eth += len; if (*eth == ',') eth++; else if (*eth != '\0') return 0; *init_out = kmalloc(transport->setup_size, GFP_KERNEL); if (*init_out == NULL) return 1; if (!transport->setup(eth, mac_out, *init_out)) { kfree(*init_out); *init_out = NULL; } return 1; } void register_transport(struct transport *new) { struct list_head *ele, *next; struct eth_init *eth; void *init; char *mac = NULL; int match; spin_lock(&transports_lock); BUG_ON(!list_empty(&new->list)); list_add(&new->list, &transports); spin_unlock(&transports_lock); list_for_each_safe(ele, next, &eth_cmd_line) { eth = list_entry(ele, struct eth_init, list); match = check_transport(new, eth->init, eth->index, &init, &mac); if (!match) continue; else if (init != NULL) { eth_configure(eth->index, init, mac, new); kfree(init); } list_del(&eth->list); } } static int eth_setup_common(char *str, int index) { struct list_head *ele; struct transport *transport; void *init; char *mac = NULL; int found = 0; spin_lock(&transports_lock); list_for_each(ele, &transports) { transport = list_entry(ele, struct transport, list); if (!check_transport(transport, str, index, &init, &mac)) continue; if (init != NULL) { eth_configure(index, init, mac, transport); kfree(init); } found = 1; break; } spin_unlock(&transports_lock); return found; } static int __init eth_setup(char *str) { struct eth_init *new; char *error; int n, err; err = eth_parse(str, &n, &str, &error); if (err) { printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", str, error); return 1; } new = alloc_bootmem(sizeof(*new)); if (new == NULL) { printk(KERN_ERR "eth_init : alloc_bootmem failed\n"); return 1; } INIT_LIST_HEAD(&new->list); new->index = n; new->init = str; list_add_tail(&new->list, &eth_cmd_line); return 1; } __setup("eth", eth_setup); __uml_help(eth_setup, "eth[0-9]+=<transport>,<options>\n" " Configure a network device.\n\n" ); static int net_config(char *str, char **error_out) { int n, err; err = eth_parse(str, &n, &str, error_out); if (err) return err; /* This string is broken up and the pieces used by the underlying * driver. So, it is freed only if eth_setup_common fails. */ str = kstrdup(str, GFP_KERNEL); if (str == NULL) { *error_out = "net_config failed to strdup string"; return -ENOMEM; } err = !eth_setup_common(str, n); if (err) kfree(str); return err; } static int net_id(char **str, int *start_out, int *end_out) { char *end; int n; n = simple_strtoul(*str, &end, 0); if ((*end != '\0') || (end == *str)) return -1; *start_out = n; *end_out = n; *str = end; return n; } static int net_remove(int n, char **error_out) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; device = find_device(n); if (device == NULL) return -ENODEV; dev = device->dev; lp = netdev_priv(dev); if (lp->fd > 0) return -EBUSY; unregister_netdev(dev); platform_device_unregister(&device->pdev); return 0; } static struct mc_device net_mc = { .list = LIST_HEAD_INIT(net_mc.list), .name = "eth", .config = net_config, .get_config = NULL, .id = net_id, .remove = net_remove, }; #ifdef CONFIG_INET static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *dev = ifa->ifa_dev->dev; struct uml_net_private *lp; void (*proc)(unsigned char *, unsigned char *, void *); unsigned char addr_buf[4], netmask_buf[4]; if (dev->netdev_ops->ndo_open != uml_net_open) return NOTIFY_DONE; lp = netdev_priv(dev); proc = NULL; switch (event) { case NETDEV_UP: proc = lp->add_address; break; case NETDEV_DOWN: proc = lp->delete_address; break; } if (proc != NULL) { memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); (*proc)(addr_buf, netmask_buf, &lp->user); } return NOTIFY_DONE; } /* uml_net_init shouldn't be called twice on two CPUs at the same time */ static struct notifier_block uml_inetaddr_notifier = { .notifier_call = uml_inetaddr_event, }; static void inet_register(void) { struct list_head *ele; struct uml_net_private *lp; struct in_device *ip; struct in_ifaddr *in; register_inetaddr_notifier(&uml_inetaddr_notifier); /* Devices may have been opened already, so the uml_inetaddr_notifier * didn't get a chance to run for them. This fakes it so that * addresses which have already been set up get handled properly. */ spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); ip = lp->dev->ip_ptr; if (ip == NULL) continue; in = ip->ifa_list; while (in != NULL) { uml_inetaddr_event(NULL, NETDEV_UP, in); in = in->ifa_next; } } spin_unlock(&opened_lock); } #else static inline void inet_register(void) { } #endif static int uml_net_init(void) { mconsole_register_dev(&net_mc); inet_register(); return 0; } __initcall(uml_net_init); static void close_devices(void) { struct list_head *ele; struct uml_net_private *lp; spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); free_irq(lp->dev->irq, lp->dev); if ((lp->close != NULL) && (lp->fd >= 0)) (*lp->close)(lp->fd, &lp->user); if (lp->remove != NULL) (*lp->remove)(&lp->user); } spin_unlock(&opened_lock); } __uml_exitcall(close_devices); void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, void *), void *arg) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; unsigned char address[4], netmask[4]; if (ip == NULL) return; in = ip->ifa_list; while (in != NULL) { memcpy(address, &in->ifa_address, sizeof(address)); memcpy(netmask, &in->ifa_mask, sizeof(netmask)); (*cb)(address, netmask, arg); in = in->ifa_next; } } int dev_netmask(void *d, void *m) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; __be32 *mask_out = m; if (ip == NULL) return 1; in = ip->ifa_list; if (in == NULL) return 1; *mask_out = in->ifa_mask; return 0; } void *get_output_buffer(int *len_out) { void *ret; ret = (void *) __get_free_pages(GFP_KERNEL, 0); if (ret) *len_out = PAGE_SIZE; else *len_out = 0; return ret; } void free_output_buffer(void *buffer) { free_pages((unsigned long) buffer, 0); } int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, char **gate_addr) { char *remain; remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); if (remain != NULL) { printk(KERN_ERR "tap_setup_common - Extra garbage on " "specification : '%s'\n", remain); return 1; } return 0; } unsigned short eth_protocol(struct sk_buff *skb) { return eth_type_trans(skb, skb->dev); }
gpl-2.0
SchulerControl/linux
arch/blackfin/mach-bf527/boards/ezbrd.c
2731
21188
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/ad7877.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF526-EZBRD"; /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PG13, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezbrd_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezbrd_flash_data = { .width = 2, .parts = ezbrd_partitions, .nr_parts = ARRAY_SIZE(ezbrd_partitions), }; static struct resource ezbrd_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezbrd_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezbrd_flash_data, }, .num_resources = 1, .resource = &ezbrd_flash_resource, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "bootloader(nand)", .offset = 0, .size = 0x40000, }, { .name = "linux kernel(nand)", .offset = MTDPART_OFS_APPEND, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "sst25wf040", }; /* SPI flash chip (sst25wf040) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PG0, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ && defined(CONFIG_SND_SOC_WM8731_SPI) { .modalias = "wm8731", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PG0, .end = GPIO_PG0, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static const u16 bfin_twi0_pins[] = {P_TWI0_SCL, P_TWI0_SDA, 0}; static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, .dev = { .platform_data = &bfin_twi0_pins, }, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_16_BIT_PPI, .use_bl = 1, .gpio_bl = GPIO_PG12, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezbrd_flash_device, #endif }; static int __init ezbrd_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezbrd_init); static struct platform_device *ezbrd_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezbrd_early_devices, ARRAY_SIZE(ezbrd_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
showp1984/bricked-pyramid-3.0
arch/arm/mach-pxa/am200epd.c
2987
9659
/* * am200epd.c -- Platform device for AM200 EPD kit * * Copyright (C) 2008, Jaya Kumar * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven. * * This work was made possible by help and equipment support from E-Ink * Corporation. http://support.eink.com/community * * This driver is written to be used with the Metronome display controller. * on the AM200 EPD prototype kit/development kit with an E-Ink 800x600 * Vizplex EPD on a Gumstix board using the Lyre interface board. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <mach/pxa25x.h> #include <mach/gumstix.h> #include <mach/pxafb.h> #include "generic.h" #include <video/metronomefb.h> static unsigned int panel_type = 6; static struct platform_device *am200_device; static struct metronome_board am200_board; static struct pxafb_mode_info am200_fb_mode_9inch7 = { .pixclock = 40000, .xres = 1200, .yres = 842, .bpp = 16, .hsync_len = 2, .left_margin = 2, .right_margin = 2, .vsync_len = 1, .upper_margin = 2, .lower_margin = 25, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info am200_fb_mode_8inch = { .pixclock = 40000, .xres = 1088, .yres = 791, .bpp = 16, .hsync_len = 28, .left_margin = 8, .right_margin = 30, .vsync_len = 8, .upper_margin = 10, .lower_margin = 8, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info am200_fb_mode_6inch = { .pixclock = 40189, .xres = 832, .yres = 622, .bpp = 16, .hsync_len = 28, .left_margin = 34, .right_margin = 34, .vsync_len = 25, .upper_margin = 0, .lower_margin = 2, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mach_info am200_fb_info = { .modes = &am200_fb_mode_6inch, .num_modes = 1, .lcd_conn = LCD_TYPE_COLOR_TFT | LCD_PCLK_EDGE_FALL | LCD_AC_BIAS_FREQ(24), }; /* register offsets for gpio control */ #define LED_GPIO_PIN 51 #define STDBY_GPIO_PIN 48 #define RST_GPIO_PIN 49 #define RDY_GPIO_PIN 32 #define ERR_GPIO_PIN 17 #define PCBPWR_GPIO_PIN 16 static int gpios[] = { LED_GPIO_PIN , STDBY_GPIO_PIN , RST_GPIO_PIN, RDY_GPIO_PIN, ERR_GPIO_PIN, PCBPWR_GPIO_PIN }; static char *gpio_names[] = { "LED" , "STDBY" , "RST", "RDY", "ERR", "PCBPWR" }; static int am200_init_gpio_regs(struct metronomefb_par *par) { int i; int err; for (i = 0; i < ARRAY_SIZE(gpios); i++) { err = gpio_request(gpios[i], gpio_names[i]); if (err) { dev_err(&am200_device->dev, "failed requesting " "gpio %s, err=%d\n", gpio_names[i], err); goto err_req_gpio; } } gpio_direction_output(LED_GPIO_PIN, 0); gpio_direction_output(STDBY_GPIO_PIN, 0); gpio_direction_output(RST_GPIO_PIN, 0); gpio_direction_input(RDY_GPIO_PIN); gpio_direction_input(ERR_GPIO_PIN); gpio_direction_output(PCBPWR_GPIO_PIN, 0); return 0; err_req_gpio: while (--i >= 0) gpio_free(gpios[i]); return err; } static void am200_cleanup(struct metronomefb_par *par) { int i; free_irq(IRQ_GPIO(RDY_GPIO_PIN), par); for (i = 0; i < ARRAY_SIZE(gpios); i++) gpio_free(gpios[i]); } static int am200_share_video_mem(struct fb_info *info) { /* rough check if this is our desired fb and not something else */ if ((info->var.xres != am200_fb_info.modes->xres) || (info->var.yres != am200_fb_info.modes->yres)) return 0; /* we've now been notified that we have our new fb */ am200_board.metromem = info->screen_base; am200_board.host_fbinfo = info; /* try to refcount host drv since we are the consumer after this */ if (!try_module_get(info->fbops->owner)) return -ENODEV; return 0; } static int am200_unshare_video_mem(struct fb_info *info) { dev_dbg(&am200_device->dev, "ENTER %s\n", __func__); if (info != am200_board.host_fbinfo) return 0; module_put(am200_board.host_fbinfo->fbops->owner); return 0; } static int am200_fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) { struct fb_event *evdata = data; struct fb_info *info = evdata->info; dev_dbg(&am200_device->dev, "ENTER %s\n", __func__); if (event == FB_EVENT_FB_REGISTERED) return am200_share_video_mem(info); else if (event == FB_EVENT_FB_UNREGISTERED) return am200_unshare_video_mem(info); return 0; } static struct notifier_block am200_fb_notif = { .notifier_call = am200_fb_notifier_callback, }; /* this gets called as part of our init. these steps must be done now so * that we can use pxa_set_fb_info */ static void __init am200_presetup_fb(void) { int fw; int fh; int padding_size; int totalsize; switch (panel_type) { case 6: am200_fb_info.modes = &am200_fb_mode_6inch; break; case 8: am200_fb_info.modes = &am200_fb_mode_8inch; break; case 97: am200_fb_info.modes = &am200_fb_mode_9inch7; break; default: dev_err(&am200_device->dev, "invalid panel_type selection," " setting to 6\n"); am200_fb_info.modes = &am200_fb_mode_6inch; break; } /* the frame buffer is divided as follows: command | CRC | padding 16kb waveform data | CRC | padding image data | CRC */ fw = am200_fb_info.modes->xres; fh = am200_fb_info.modes->yres; /* waveform must be 16k + 2 for checksum */ am200_board.wfm_size = roundup(16*1024 + 2, fw); padding_size = PAGE_SIZE + (4 * fw); /* total is 1 cmd , 1 wfm, padding and image */ totalsize = fw + am200_board.wfm_size + padding_size + (fw*fh); /* save this off because we're manipulating fw after this and * we'll need it when we're ready to setup the framebuffer */ am200_board.fw = fw; am200_board.fh = fh; /* the reason we do this adjustment is because we want to acquire * more framebuffer memory without imposing custom awareness on the * underlying pxafb driver */ am200_fb_info.modes->yres = DIV_ROUND_UP(totalsize, fw); /* we divide since we told the LCD controller we're 16bpp */ am200_fb_info.modes->xres /= 2; pxa_set_fb_info(NULL, &am200_fb_info); } /* this gets called by metronomefb as part of its init, in our case, we * have already completed initial framebuffer init in presetup_fb so we * can just setup the fb access pointers */ static int am200_setup_fb(struct metronomefb_par *par) { int fw; int fh; fw = am200_board.fw; fh = am200_board.fh; /* metromem was set up by the notifier in share_video_mem so now * we can use its value to calculate the other entries */ par->metromem_cmd = (struct metromem_cmd *) am200_board.metromem; par->metromem_wfm = am200_board.metromem + fw; par->metromem_img = par->metromem_wfm + am200_board.wfm_size; par->metromem_img_csum = (u16 *) (par->metromem_img + (fw * fh)); par->metromem_dma = am200_board.host_fbinfo->fix.smem_start; return 0; } static int am200_get_panel_type(void) { return panel_type; } static irqreturn_t am200_handle_irq(int irq, void *dev_id) { struct metronomefb_par *par = dev_id; wake_up_interruptible(&par->waitq); return IRQ_HANDLED; } static int am200_setup_irq(struct fb_info *info) { int ret; ret = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am200_handle_irq, IRQF_DISABLED|IRQF_TRIGGER_FALLING, "AM200", info->par); if (ret) dev_err(&am200_device->dev, "request_irq failed: %d\n", ret); return ret; } static void am200_set_rst(struct metronomefb_par *par, int state) { gpio_set_value(RST_GPIO_PIN, state); } static void am200_set_stdby(struct metronomefb_par *par, int state) { gpio_set_value(STDBY_GPIO_PIN, state); } static int am200_wait_event(struct metronomefb_par *par) { return wait_event_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ); } static int am200_wait_event_intr(struct metronomefb_par *par) { return wait_event_interruptible_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ); } static struct metronome_board am200_board = { .owner = THIS_MODULE, .setup_irq = am200_setup_irq, .setup_io = am200_init_gpio_regs, .setup_fb = am200_setup_fb, .set_rst = am200_set_rst, .set_stdby = am200_set_stdby, .met_wait_event = am200_wait_event, .met_wait_event_intr = am200_wait_event_intr, .get_panel_type = am200_get_panel_type, .cleanup = am200_cleanup, }; static unsigned long am200_pin_config[] __initdata = { GPIO51_GPIO, GPIO49_GPIO, GPIO48_GPIO, GPIO32_GPIO, GPIO17_GPIO, GPIO16_GPIO, }; int __init am200_init(void) { int ret; /* before anything else, we request notification for any fb * creation events */ fb_register_client(&am200_fb_notif); pxa2xx_mfp_config(ARRAY_AND_SIZE(am200_pin_config)); /* request our platform independent driver */ request_module("metronomefb"); am200_device = platform_device_alloc("metronomefb", -1); if (!am200_device) return -ENOMEM; /* the am200_board that will be seen by metronomefb is a copy */ platform_device_add_data(am200_device, &am200_board, sizeof(am200_board)); /* this _add binds metronomefb to am200. metronomefb refcounts am200 */ ret = platform_device_add(am200_device); if (ret) { platform_device_put(am200_device); fb_unregister_client(&am200_fb_notif); return ret; } am200_presetup_fb(); return 0; } module_param(panel_type, uint, 0); MODULE_PARM_DESC(panel_type, "Select the panel type: 6, 8, 97"); MODULE_DESCRIPTION("board driver for am200 metronome epd kit"); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL");
gpl-2.0
gunine/boeffla-kernel-jb-lte
arch/sh/mm/pmb.c
2987
19014
/* * arch/sh/mm/pmb.c * * Privileged Space Mapping Buffer (PMB) Support. * * Copyright (C) 2005 - 2011 Paul Mundt * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/syscore_ops.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <asm/cacheflush.h> #include <asm/sizes.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/mmu_context.h> struct pmb_entry; struct pmb_entry { unsigned long vpn; unsigned long ppn; unsigned long flags; unsigned long size; raw_spinlock_t lock; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or * PMB_NO_ENTRY to search for a free one */ int entry; /* Adjacent entry link for contiguous multi-entry mappings */ struct pmb_entry *link; }; static struct { unsigned long size; int flag; } pmb_sizes[] = { { .size = SZ_512M, .flag = PMB_SZ_512M, }, { .size = SZ_128M, .flag = PMB_SZ_128M, }, { .size = SZ_64M, .flag = PMB_SZ_64M, }, { .size = SZ_16M, .flag = PMB_SZ_16M, }, }; static void pmb_unmap_entry(struct pmb_entry *, int depth); static DEFINE_RWLOCK(pmb_rwlock); static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); static unsigned int pmb_iomapping_enabled; static __always_inline unsigned long mk_pmb_entry(unsigned int entry) { return (entry & PMB_E_MASK) << PMB_E_SHIFT; } static __always_inline unsigned long mk_pmb_addr(unsigned int entry) { return mk_pmb_entry(entry) | PMB_ADDR; } static __always_inline unsigned long mk_pmb_data(unsigned int entry) { return mk_pmb_entry(entry) | PMB_DATA; } static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) { return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } /* * Ensure that the PMB entries match our cache configuration. * * When we are in 32-bit address extended mode, CCR.CB becomes * invalid, so care must be taken to manually adjust cacheable * translations. */ static __always_inline unsigned long pmb_cache_flags(void) { unsigned long flags = 0; #if defined(CONFIG_CACHE_OFF) flags |= PMB_WT | PMB_UB; #elif defined(CONFIG_CACHE_WRITETHROUGH) flags |= PMB_C | PMB_WT | PMB_UB; #elif defined(CONFIG_CACHE_WRITEBACK) flags |= PMB_C; #endif return flags; } /* * Convert typical pgprot value to the PMB equivalent */ static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) { unsigned long pmb_flags = 0; u64 flags = pgprot_val(prot); if (flags & _PAGE_CACHABLE) pmb_flags |= PMB_C; if (flags & _PAGE_WT) pmb_flags |= PMB_WT | PMB_UB; return pmb_flags; } static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) { return (b->vpn == (a->vpn + a->size)) && (b->ppn == (a->ppn + a->size)) && (b->flags == a->flags); } static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, unsigned long size) { int i; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe, *iter; unsigned long span; if (!test_bit(i, pmb_map)) continue; pmbe = &pmb_entry_list[i]; /* * See if VPN and PPN are bounded by an existing mapping. */ if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) continue; if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) continue; /* * Now see if we're in range of a simple mapping. */ if (size <= pmbe->size) { read_unlock(&pmb_rwlock); return true; } span = pmbe->size; /* * Finally for sizes that involve compound mappings, walk * the chain. */ for (iter = pmbe->link; iter; iter = iter->link) span += iter->size; /* * Nothing else to do if the range requirements are met. */ if (size <= span) { read_unlock(&pmb_rwlock); return true; } } read_unlock(&pmb_rwlock); return false; } static bool pmb_size_valid(unsigned long size) { int i; for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) if (pmb_sizes[i].size == size) return true; return false; } static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) { return (addr >= P1SEG && (addr + size - 1) < P3SEG); } static inline bool pmb_prot_valid(pgprot_t prot) { return (pgprot_val(prot) & _PAGE_USER) == 0; } static int pmb_size_to_flags(unsigned long size) { int i; for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) if (pmb_sizes[i].size == size) return pmb_sizes[i].flag; return 0; } static int pmb_alloc_entry(void) { int pos; pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); if (pos >= 0 && pos < NR_PMB_ENTRIES) __set_bit(pos, pmb_map); else pos = -ENOSPC; return pos; } static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, unsigned long flags, int entry) { struct pmb_entry *pmbe; unsigned long irqflags; void *ret = NULL; int pos; write_lock_irqsave(&pmb_rwlock, irqflags); if (entry == PMB_NO_ENTRY) { pos = pmb_alloc_entry(); if (unlikely(pos < 0)) { ret = ERR_PTR(pos); goto out; } } else { if (__test_and_set_bit(entry, pmb_map)) { ret = ERR_PTR(-ENOSPC); goto out; } pos = entry; } write_unlock_irqrestore(&pmb_rwlock, irqflags); pmbe = &pmb_entry_list[pos]; memset(pmbe, 0, sizeof(struct pmb_entry)); raw_spin_lock_init(&pmbe->lock); pmbe->vpn = vpn; pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; return pmbe; out: write_unlock_irqrestore(&pmb_rwlock, irqflags); return ret; } static void pmb_free(struct pmb_entry *pmbe) { __clear_bit(pmbe->entry, pmb_map); pmbe->entry = PMB_NO_ENTRY; pmbe->link = NULL; } /* * Must be run uncached. */ static void __set_pmb_entry(struct pmb_entry *pmbe) { unsigned long addr, data; addr = mk_pmb_addr(pmbe->entry); data = mk_pmb_data(pmbe->entry); jump_to_uncached(); /* Set V-bit */ __raw_writel(pmbe->vpn | PMB_V, addr); __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); back_to_cached(); } static void __clear_pmb_entry(struct pmb_entry *pmbe) { unsigned long addr, data; unsigned long addr_val, data_val; addr = mk_pmb_addr(pmbe->entry); data = mk_pmb_data(pmbe->entry); addr_val = __raw_readl(addr); data_val = __raw_readl(data); /* Clear V-bit */ writel_uncached(addr_val & ~PMB_V, addr); writel_uncached(data_val & ~PMB_V, data); } #ifdef CONFIG_PM static void set_pmb_entry(struct pmb_entry *pmbe) { unsigned long flags; raw_spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); raw_spin_unlock_irqrestore(&pmbe->lock, flags); } #endif /* CONFIG_PM */ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long orig_addr, orig_size; unsigned long flags, pmb_flags; int i, mapped; if (size < SZ_16M) return -EINVAL; if (!pmb_addr_valid(vaddr, size)) return -EFAULT; if (pmb_mapping_exists(vaddr, phys, size)) return 0; orig_addr = vaddr; orig_size = size; flush_tlb_kernel_range(vaddr, vaddr + size); pmb_flags = pgprot_to_pmb_flags(prot); pmbp = NULL; do { for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, PMB_NO_ENTRY); if (IS_ERR(pmbe)) { pmb_unmap_entry(pmbp, mapped); return PTR_ERR(pmbe); } raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = pmb_sizes[i].size; __set_pmb_entry(pmbe); phys += pmbe->size; vaddr += pmbe->size; size -= pmbe->size; /* * Link adjacent entries that span multiple PMB * entries for easier tear-down. */ if (likely(pmbp)) { raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); pmbp->link = pmbe; raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; /* * Instead of trying smaller sizes on every * iteration (even if we succeed in allocating * space), try using pmb_sizes[i].size again. */ i--; mapped++; raw_spin_unlock_irqrestore(&pmbe->lock, flags); } } while (size >= SZ_16M); flush_cache_vmap(orig_addr, orig_addr + orig_size); return 0; } void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, pgprot_t prot, void *caller) { unsigned long vaddr; phys_addr_t offset, last_addr; phys_addr_t align_mask; unsigned long aligned; struct vm_struct *area; int i, ret; if (!pmb_iomapping_enabled) return NULL; /* * Small mappings need to go through the TLB. */ if (size < SZ_16M) return ERR_PTR(-EINVAL); if (!pmb_prot_valid(prot)) return ERR_PTR(-EINVAL); for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) if (size >= pmb_sizes[i].size) break; last_addr = phys + size; align_mask = ~(pmb_sizes[i].size - 1); offset = phys & ~align_mask; phys &= align_mask; aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; /* * XXX: This should really start from uncached_end, but this * causes the MMU to reset, so for now we restrict it to the * 0xb000...0xc000 range. */ area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, P3SEG, caller); if (!area) return NULL; area->phys_addr = phys; vaddr = (unsigned long)area->addr; ret = pmb_bolt_mapping(vaddr, phys, size, prot); if (unlikely(ret != 0)) return ERR_PTR(ret); return (void __iomem *)(offset + (char *)vaddr); } int pmb_unmap(void __iomem *addr) { struct pmb_entry *pmbe = NULL; unsigned long vaddr = (unsigned long __force)addr; int i, found = 0; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; if (pmbe->vpn == vaddr) { found = 1; break; } } } read_unlock(&pmb_rwlock); if (found) { pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); return 0; } return -EINVAL; } static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) { do { struct pmb_entry *pmblink = pmbe; /* * We may be called before this pmb_entry has been * entered into the PMB table via set_pmb_entry(), but * that's OK because we've allocated a unique slot for * this entry in pmb_alloc() (even if we haven't filled * it yet). * * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot. */ __clear_pmb_entry(pmbe); flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); pmbe = pmblink->link; pmb_free(pmblink); } while (pmbe && --depth); } static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) { unsigned long flags; if (unlikely(!pmbe)) return; write_lock_irqsave(&pmb_rwlock, flags); __pmb_unmap_entry(pmbe, depth); write_unlock_irqrestore(&pmb_rwlock, flags); } static void __init pmb_notify(void) { int i; pr_info("PMB: boot mappings:\n"); read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe; if (!test_bit(i, pmb_map)) continue; pmbe = &pmb_entry_list[i]; pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); } read_unlock(&pmb_rwlock); } /* * Sync our software copy of the PMB mappings with those in hardware. The * mappings in the hardware PMB were either set up by the bootloader or * very early on by the kernel. */ static void __init pmb_synchronize(void) { struct pmb_entry *pmbp = NULL; int i, j; /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid * PPN range. Specifically, we only care about existing mappings * that impact the cached/uncached sections. * * Note that touching these can be a bit of a minefield; the boot * loader can establish multi-page mappings with the same caching * attributes, so we need to ensure that we aren't modifying a * mapping that we're presently executing from, or may execute * from in the case of straddling page boundaries. * * In the future we will have to tidy up after the boot loader by * jumping between the cached and uncached mappings and tearing * down alternating mappings while executing from the other. */ for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; unsigned long irqflags; unsigned int size; struct pmb_entry *pmbe; addr = mk_pmb_addr(i); data = mk_pmb_data(i); addr_val = __raw_readl(addr); data_val = __raw_readl(data); /* * Skip over any bogus entries */ if (!(data_val & PMB_V) || !(addr_val & PMB_V)) continue; ppn = data_val & PMB_PFN_MASK; vpn = addr_val & PMB_PFN_MASK; /* * Only preserve in-range mappings. */ if (!pmb_ppn_in_range(ppn)) { /* * Invalidate anything out of bounds. */ writel_uncached(addr_val & ~PMB_V, addr); writel_uncached(data_val & ~PMB_V, data); continue; } /* * Update the caching attributes if necessary */ if (data_val & PMB_C) { data_val &= ~PMB_CACHE_MASK; data_val |= pmb_cache_flags(); writel_uncached(data_val, data); } size = data_val & PMB_SZ_MASK; flags = size | (data_val & PMB_CACHE_MASK); pmbe = pmb_alloc(vpn, ppn, flags, i); if (IS_ERR(pmbe)) { WARN_ON_ONCE(1); continue; } raw_spin_lock_irqsave(&pmbe->lock, irqflags); for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; if (pmbp) { raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, * setup the entry links accordingly. Compound mappings * are later coalesced. */ if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; raw_spin_unlock(&pmbp->lock); } pmbp = pmbe; raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); } } static void __init pmb_merge(struct pmb_entry *head) { unsigned long span, newsize; struct pmb_entry *tail; int i = 1, depth = 0; span = newsize = head->size; tail = head->link; while (tail) { span += tail->size; if (pmb_size_valid(span)) { newsize = span; depth = i; } /* This is the end of the line.. */ if (!tail->link) break; tail = tail->link; i++; } /* * The merged page size must be valid. */ if (!depth || !pmb_size_valid(newsize)) return; head->flags &= ~PMB_SZ_MASK; head->flags |= pmb_size_to_flags(newsize); head->size = newsize; __pmb_unmap_entry(head->link, depth); __set_pmb_entry(head); } static void __init pmb_coalesce(void) { unsigned long flags; int i; write_lock_irqsave(&pmb_rwlock, flags); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe; if (!test_bit(i, pmb_map)) continue; pmbe = &pmb_entry_list[i]; /* * We're only interested in compound mappings */ if (!pmbe->link) continue; /* * Nothing to do if it already uses the largest possible * page size. */ if (pmbe->size == SZ_512M) continue; pmb_merge(pmbe); } write_unlock_irqrestore(&pmb_rwlock, flags); } #ifdef CONFIG_UNCACHED_MAPPING static void __init pmb_resize(void) { int i; /* * If the uncached mapping was constructed by the kernel, it will * already be a reasonable size. */ if (uncached_size == SZ_16M) return; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe; unsigned long flags; if (!test_bit(i, pmb_map)) continue; pmbe = &pmb_entry_list[i]; if (pmbe->vpn != uncached_start) continue; /* * Found it, now resize it. */ raw_spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = SZ_16M; pmbe->flags &= ~PMB_SZ_MASK; pmbe->flags |= pmb_size_to_flags(pmbe->size); uncached_resize(pmbe->size); __set_pmb_entry(pmbe); raw_spin_unlock_irqrestore(&pmbe->lock, flags); } read_unlock(&pmb_rwlock); } #endif static int __init early_pmb(char *p) { if (!p) return 0; if (strstr(p, "iomap")) pmb_iomapping_enabled = 1; return 0; } early_param("pmb", early_pmb); void __init pmb_init(void) { /* Synchronize software state */ pmb_synchronize(); /* Attempt to combine compound mappings */ pmb_coalesce(); #ifdef CONFIG_UNCACHED_MAPPING /* Resize initial mappings, if necessary */ pmb_resize(); #endif /* Log them */ pmb_notify(); writel_uncached(0, PMB_IRMCR); /* Flush out the TLB */ local_flush_tlb_all(); ctrl_barrier(); } bool __in_29bit_mode(void) { return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; } static int pmb_seq_show(struct seq_file *file, void *iter) { int i; seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); seq_printf(file, "ety vpn ppn size flags\n"); for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned int size; char *sz_str = NULL; addr = __raw_readl(mk_pmb_addr(i)); data = __raw_readl(mk_pmb_data(i)); size = data & PMB_SZ_MASK; sz_str = (size == PMB_SZ_16M) ? " 16MB": (size == PMB_SZ_64M) ? " 64MB": (size == PMB_SZ_128M) ? "128MB": "512MB"; /* 02: V 0x88 0x08 128MB C CB B */ seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', (addr >> 24) & 0xff, (data >> 24) & 0xff, sz_str, (data & PMB_C) ? 'C' : ' ', (data & PMB_WT) ? "WT" : "CB", (data & PMB_UB) ? "UB" : " B"); } return 0; } static int pmb_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, pmb_seq_show, NULL); } static const struct file_operations pmb_debugfs_fops = { .owner = THIS_MODULE, .open = pmb_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init pmb_debugfs_init(void) { struct dentry *dentry; dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL, &pmb_debugfs_fops); if (!dentry) return -ENOMEM; return 0; } subsys_initcall(pmb_debugfs_init); #ifdef CONFIG_PM static void pmb_syscore_resume(void) { struct pmb_entry *pmbe; int i; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } } read_unlock(&pmb_rwlock); } static struct syscore_ops pmb_syscore_ops = { .resume = pmb_syscore_resume, }; static int __init pmb_sysdev_init(void) { register_syscore_ops(&pmb_syscore_ops); return 0; } subsys_initcall(pmb_sysdev_init); #endif
gpl-2.0
OESF/Linaro-Android_LinaroSprint2011Q1
drivers/char/ps3flash.c
3499
10994
/* * PS3 FLASH ROM Storage Driver * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <asm/lv1call.h> #include <asm/ps3stor.h> #define DEVICE_NAME "ps3flash" #define FLASH_BLOCK_SIZE (256*1024) struct ps3flash_private { struct mutex mutex; /* Bounce buffer mutex */ u64 chunk_sectors; int tag; /* Start sector of buffer, -1 if invalid */ bool dirty; }; static struct ps3_storage_device *ps3flash_dev; static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, u64 start_sector, int write) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, start_sector, priv->chunk_sectors, write); if (res) { dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, __LINE__, write ? "write" : "read", res); return -EIO; } return 0; } static int ps3flash_writeback(struct ps3_storage_device *dev) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (!priv->dirty || priv->tag < 0) return 0; res = ps3flash_read_write_sectors(dev, priv->tag, 1); if (res) return res; priv->dirty = false; return 0; } static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) { struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); int res; if (start_sector == priv->tag) return 0; res = ps3flash_writeback(dev); if (res) return res; priv->tag = -1; res = ps3flash_read_write_sectors(dev, start_sector, 0); if (res) return res; priv->tag = start_sector; return 0; } static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) { struct ps3_storage_device *dev = ps3flash_dev; loff_t res; mutex_lock(&file->f_mapping->host->i_mutex); switch (origin) { case 1: offset += file->f_pos; break; case 2: offset += dev->regions[dev->region_idx].size*dev->blk_size; break; } if (offset < 0) { res = -EINVAL; goto out; } file->f_pos = offset; res = file->f_pos; out: mutex_unlock(&file->f_mapping->host->i_mutex); return res; } static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res; size_t remaining, n; const void *src; dev_dbg(&dev->sbd.core, "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); src = dev->bounce_buf + offset; mutex_lock(&priv->mutex); res = ps3flash_fetch(dev, sector); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", __func__, __LINE__, n, src, userbuf, kernelbuf); if (userbuf) { if (copy_to_user(userbuf, src, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(kernelbuf, src, n); kernelbuf += n; } mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_write(const char __user *userbuf, const void *kernelbuf, size_t count, loff_t *pos) { struct ps3_storage_device *dev = ps3flash_dev; struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); u64 size, sector, offset; int res = 0; size_t remaining, n; void *dst; dev_dbg(&dev->sbd.core, "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", __func__, __LINE__, count, *pos, userbuf, kernelbuf); size = dev->regions[dev->region_idx].size*dev->blk_size; if (*pos >= size || !count) return 0; if (*pos + count > size) { dev_dbg(&dev->sbd.core, "%s:%u Truncating count from %zu to %llu\n", __func__, __LINE__, count, size - *pos); count = size - *pos; } sector = *pos / dev->bounce_size * priv->chunk_sectors; offset = *pos % dev->bounce_size; remaining = count; do { n = min_t(u64, remaining, dev->bounce_size - offset); dst = dev->bounce_buf + offset; mutex_lock(&priv->mutex); if (n != dev->bounce_size) res = ps3flash_fetch(dev, sector); else if (sector != priv->tag) res = ps3flash_writeback(dev); if (res) goto fail; dev_dbg(&dev->sbd.core, "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", __func__, __LINE__, n, userbuf, kernelbuf, dst); if (userbuf) { if (copy_from_user(dst, userbuf, n)) { res = -EFAULT; goto fail; } userbuf += n; } if (kernelbuf) { memcpy(dst, kernelbuf, n); kernelbuf += n; } priv->tag = sector; priv->dirty = true; mutex_unlock(&priv->mutex); *pos += n; remaining -= n; sector += priv->chunk_sectors; offset = 0; } while (remaining > 0); return count; fail: mutex_unlock(&priv->mutex); return res; } static ssize_t ps3flash_user_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { return ps3flash_read(buf, NULL, count, pos); } static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { return ps3flash_write(buf, NULL, count, pos); } static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) { return ps3flash_read(NULL, buf, count, &pos); } static ssize_t ps3flash_kernel_write(const void *buf, size_t count, loff_t pos) { ssize_t res; int wb; res = ps3flash_write(NULL, buf, count, &pos); if (res < 0) return res; /* Make kernel writes synchronous */ wb = ps3flash_writeback(ps3flash_dev); if (wb) return wb; return res; } static int ps3flash_flush(struct file *file, fl_owner_t id) { return ps3flash_writeback(ps3flash_dev); } static int ps3flash_fsync(struct file *file, int datasync) { return ps3flash_writeback(ps3flash_dev); } static irqreturn_t ps3flash_interrupt(int irq, void *data) { struct ps3_storage_device *dev = data; int res; u64 tag, status; res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); if (tag != dev->tag) dev_err(&dev->sbd.core, "%s:%u: tag mismatch, got %llx, expected %llx\n", __func__, __LINE__, tag, dev->tag); if (res) { dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", __func__, __LINE__, res, status); } else { dev->lv1_status = status; complete(&dev->done); } return IRQ_HANDLED; } static const struct file_operations ps3flash_fops = { .owner = THIS_MODULE, .llseek = ps3flash_llseek, .read = ps3flash_user_read, .write = ps3flash_user_write, .flush = ps3flash_flush, .fsync = ps3flash_fsync, }; static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { .read = ps3flash_kernel_read, .write = ps3flash_kernel_write, }; static struct miscdevice ps3flash_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DEVICE_NAME, .fops = &ps3flash_fops, }; static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); struct ps3flash_private *priv; int error; unsigned long tmp; tmp = dev->regions[dev->region_idx].start*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region start %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } tmp = dev->regions[dev->region_idx].size*dev->blk_size; if (tmp % FLASH_BLOCK_SIZE) { dev_err(&dev->sbd.core, "%s:%u region size %lu is not aligned\n", __func__, __LINE__, tmp); return -EINVAL; } /* use static buffer, kmalloc cannot allocate 256 KiB */ if (!ps3flash_bounce_buffer.address) return -ENODEV; if (ps3flash_dev) { dev_err(&dev->sbd.core, "Only one FLASH device is supported\n"); return -EBUSY; } ps3flash_dev = dev; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto fail; } ps3_system_bus_set_drvdata(&dev->sbd, priv); mutex_init(&priv->mutex); priv->tag = -1; dev->bounce_size = ps3flash_bounce_buffer.size; dev->bounce_buf = ps3flash_bounce_buffer.address; priv->chunk_sectors = dev->bounce_size / dev->blk_size; error = ps3stor_setup(dev, ps3flash_interrupt); if (error) goto fail_free_priv; ps3flash_misc.parent = &dev->sbd.core; error = misc_register(&ps3flash_misc); if (error) { dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", __func__, __LINE__, error); goto fail_teardown; } dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", __func__, __LINE__, ps3flash_misc.minor); ps3_os_area_flash_register(&ps3flash_kernel_ops); return 0; fail_teardown: ps3stor_teardown(dev); fail_free_priv: kfree(priv); ps3_system_bus_set_drvdata(&dev->sbd, NULL); fail: ps3flash_dev = NULL; return error; } static int ps3flash_remove(struct ps3_system_bus_device *_dev) { struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); ps3_os_area_flash_register(NULL); misc_deregister(&ps3flash_misc); ps3stor_teardown(dev); kfree(ps3_system_bus_get_drvdata(&dev->sbd)); ps3_system_bus_set_drvdata(&dev->sbd, NULL); ps3flash_dev = NULL; return 0; } static struct ps3_system_bus_driver ps3flash = { .match_id = PS3_MATCH_ID_STOR_FLASH, .core.name = DEVICE_NAME, .core.owner = THIS_MODULE, .probe = ps3flash_probe, .remove = ps3flash_remove, .shutdown = ps3flash_remove, }; static int __init ps3flash_init(void) { return ps3_system_bus_driver_register(&ps3flash); } static void __exit ps3flash_exit(void) { ps3_system_bus_driver_unregister(&ps3flash); } module_init(ps3flash_init); module_exit(ps3flash_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); MODULE_AUTHOR("Sony Corporation"); MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH);
gpl-2.0
hastalafiesta/android_kernel_lge_g3
drivers/gpu/drm/nouveau/nouveau_acpi.c
4011
11247
#include <linux/pci.h> #include <linux/acpi.h> #include <linux/slab.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> #include <acpi/video.h> #include <acpi/acpi.h> #include <linux/mxm-wmi.h> #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nv50_display.h" #include "nouveau_connector.h" #include <linux/vga_switcheroo.h> #define NOUVEAU_DSM_LED 0x02 #define NOUVEAU_DSM_LED_STATE 0x00 #define NOUVEAU_DSM_LED_OFF 0x10 #define NOUVEAU_DSM_LED_STAMINA 0x11 #define NOUVEAU_DSM_LED_SPEED 0x12 #define NOUVEAU_DSM_POWER 0x03 #define NOUVEAU_DSM_POWER_STATE 0x00 #define NOUVEAU_DSM_POWER_SPEED 0x01 #define NOUVEAU_DSM_POWER_STAMINA 0x02 #define NOUVEAU_DSM_OPTIMUS_FN 0x1A #define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001 static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; acpi_handle dhandle; acpi_handle rom_handle; } nouveau_dsm_priv; #define NOUVEAU_DSM_HAS_MUX 0x1 #define NOUVEAU_DSM_HAS_OPT 0x2 static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, }; static const char nouveau_op_dsm_muid[] = { 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, }; static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int i, err; char args_buff[4]; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_op_dsm_muid); params[0].buffer.pointer = (char *)nouveau_op_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000100; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_BUFFER; params[3].buffer.length = 4; /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */ for (i = 0; i < 4; i++) args_buff[i] = (arg >> i * 8) & 0xFF; params[3].buffer.pointer = args_buff; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) { return -ENODEV; } if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int err; input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; params[0].buffer.length = sizeof(nouveau_dsm_muid); params[0].buffer.pointer = (char *)nouveau_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000102; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; params[3].type = ACPI_TYPE_INTEGER; params[3].integer.value = arg; err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } obj = (union acpi_object *)output.pointer; if (obj->type == ACPI_TYPE_INTEGER) if (obj->integer.value == 0x80000002) return -ENODEV; if (obj->type == ACPI_TYPE_BUFFER) { if (obj->buffer.length == 4 && result) { *result = 0; *result |= obj->buffer.pointer[0]; *result |= (obj->buffer.pointer[1] << 8); *result |= (obj->buffer.pointer[2] << 16); *result |= (obj->buffer.pointer[3] << 24); } } kfree(output.pointer); return 0; } /* Returns 1 if a DSM function is usable and 0 otherwise */ static int nouveau_test_dsm(acpi_handle test_handle, int (*dsm_func)(acpi_handle, int, int, uint32_t *), int sfnc) { u32 result = 0; /* Function 0 returns a Buffer containing available functions. The args * parameter is ignored for function 0, so just put 0 in it */ if (dsm_func(test_handle, 0, 0, &result)) return 0; /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If * the n-th bit is enabled, function n is supported */ return result & 1 && result & (1 << sfnc); } static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) { mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); mxm_wmi_call_mxds(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0); return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); } static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) { int arg; if (state == VGA_SWITCHEROO_ON) arg = NOUVEAU_DSM_POWER_SPEED; else arg = NOUVEAU_DSM_POWER_STAMINA; nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); return 0; } static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) { /* perhaps the _DSM functions are mutually exclusive, but prepare for * the future */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; if (id == VGA_SWITCHEROO_IGD) return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); else return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_SPEED); } static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { if (id == VGA_SWITCHEROO_IGD) return 0; /* Optimus laptops have the card already disabled in * nouveau_switcheroo_set_state */ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) return 0; return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } static int nouveau_dsm_init(void) { return 0; } static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ if (pdev->vendor == PCI_VENDOR_ID_INTEL) return VGA_SWITCHEROO_IGD; /* is this device on Bus 0? - this may need improving */ if (pdev->bus->number == 0) return VGA_SWITCHEROO_IGD; return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; static int nouveau_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle, nvidia_handle; acpi_status status; int retval = 0; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); if (ACPI_FAILURE(status)) { return false; } if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm, NOUVEAU_DSM_OPTIMUS_FN)) retval |= NOUVEAU_DSM_HAS_OPT; if (retval) nouveau_dsm_priv.dhandle = dhandle; return retval; } static bool nouveau_dsm_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; int has_dsm = 0; int has_optimus = 0; int vga_count = 0; bool guid_valid; int retval; bool ret = false; /* lookup the MXM GUID */ guid_valid = mxm_wmi_supported(); if (guid_valid) printk("MXM: GUID detected in BIOS\n"); /* now do DSM detection */ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; retval = nouveau_dsm_pci_probe(pdev); if (retval & NOUVEAU_DSM_HAS_MUX) has_dsm |= 1; if (retval & NOUVEAU_DSM_HAS_OPT) has_optimus = 1; } if (vga_count == 2 && has_dsm && guid_valid) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", acpi_method_name); nouveau_dsm_priv.dsm_detected = true; ret = true; } if (has_optimus == 1) { acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); nouveau_dsm_priv.optimus_detected = true; ret = true; } return ret; } void nouveau_register_dsm_handler(void) { bool r; r = nouveau_dsm_detect(); if (!r) return; vga_switcheroo_register_handler(&nouveau_dsm_handler); } /* Must be called for Optimus models before the card can be turned off */ void nouveau_switcheroo_optimus_dsm(void) { u32 result = 0; if (!nouveau_dsm_priv.optimus_detected) return; nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN, NOUVEAU_DSM_OPTIMUS_ARGS, &result); } void nouveau_unregister_dsm_handler(void) { vga_switcheroo_unregister_handler(); } /* retrieve the ROM in 4k blocks */ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object rom_arg_elements[2], *obj; struct acpi_object_list rom_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; rom_arg.count = 2; rom_arg.pointer = &rom_arg_elements[0]; rom_arg_elements[0].type = ACPI_TYPE_INTEGER; rom_arg_elements[0].integer.value = offset; rom_arg_elements[1].type = ACPI_TYPE_INTEGER; rom_arg_elements[1].integer.value = len; status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer); if (ACPI_FAILURE(status)) { printk(KERN_INFO "failed to evaluate ROM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; } bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { acpi_status status; acpi_handle dhandle, rom_handle; if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) return false; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "_ROM", &rom_handle); if (ACPI_FAILURE(status)) return false; nouveau_dsm_priv.rom_handle = rom_handle; return true; } int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); } int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { struct nouveau_connector *nv_connector = nouveau_connector(connector); struct acpi_device *acpidev; acpi_handle handle; int type, ret; void *edid; switch (connector->connector_type) { case DRM_MODE_CONNECTOR_LVDS: case DRM_MODE_CONNECTOR_eDP: type = ACPI_VIDEO_DISPLAY_LCD; break; default: return -EINVAL; } handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle) return -ENODEV; ret = acpi_bus_get_device(handle, &acpidev); if (ret) return -ENODEV; ret = acpi_video_get_edid(acpidev, type, -1, &edid); if (ret < 0) return ret; nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); return 0; }
gpl-2.0
yu-validus/kernel_cyanogen_msm8916
arch/mips/txx9/generic/smsc_fdc37m81x.c
4267
4900
/* * Interface for smsc fdc48m81x Super IO chip * * Author: MontaVista Software, Inc. source@mvista.com * * 2001-2003 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * * Copyright 2004 (c) MontaVista Software, Inc. */ #include <linux/init.h> #include <linux/types.h> #include <asm/io.h> #include <asm/txx9/smsc_fdc37m81x.h> /* Common Registers */ #define SMSC_FDC37M81X_CONFIG_INDEX 0x00 #define SMSC_FDC37M81X_CONFIG_DATA 0x01 #define SMSC_FDC37M81X_CONF 0x02 #define SMSC_FDC37M81X_INDEX 0x03 #define SMSC_FDC37M81X_DNUM 0x07 #define SMSC_FDC37M81X_DID 0x20 #define SMSC_FDC37M81X_DREV 0x21 #define SMSC_FDC37M81X_PCNT 0x22 #define SMSC_FDC37M81X_PMGT 0x23 #define SMSC_FDC37M81X_OSC 0x24 #define SMSC_FDC37M81X_CONFPA0 0x26 #define SMSC_FDC37M81X_CONFPA1 0x27 #define SMSC_FDC37M81X_TEST4 0x2B #define SMSC_FDC37M81X_TEST5 0x2C #define SMSC_FDC37M81X_TEST1 0x2D #define SMSC_FDC37M81X_TEST2 0x2E #define SMSC_FDC37M81X_TEST3 0x2F /* Logical device numbers */ #define SMSC_FDC37M81X_FDD 0x00 #define SMSC_FDC37M81X_SERIAL1 0x04 #define SMSC_FDC37M81X_SERIAL2 0x05 #define SMSC_FDC37M81X_KBD 0x07 /* Logical device Config Registers */ #define SMSC_FDC37M81X_ACTIVE 0x30 #define SMSC_FDC37M81X_BASEADDR0 0x60 #define SMSC_FDC37M81X_BASEADDR1 0x61 #define SMSC_FDC37M81X_INT 0x70 #define SMSC_FDC37M81X_INT2 0x72 #define SMSC_FDC37M81X_MODE 0xF0 /* Chip Config Values */ #define SMSC_FDC37M81X_CONFIG_ENTER 0x55 #define SMSC_FDC37M81X_CONFIG_EXIT 0xaa #define SMSC_FDC37M81X_CHIP_ID 0x4d static unsigned long g_smsc_fdc37m81x_base; static inline unsigned char smsc_fdc37m81x_rd(unsigned char index) { outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); return inb(g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA); } static inline void smsc_dc37m81x_wr(unsigned char index, unsigned char data) { outb(index, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); outb(data, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_DATA); } void smsc_fdc37m81x_config_beg(void) { if (g_smsc_fdc37m81x_base) { outb(SMSC_FDC37M81X_CONFIG_ENTER, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); } } void smsc_fdc37m81x_config_end(void) { if (g_smsc_fdc37m81x_base) outb(SMSC_FDC37M81X_CONFIG_EXIT, g_smsc_fdc37m81x_base + SMSC_FDC37M81X_CONFIG_INDEX); } u8 smsc_fdc37m81x_config_get(u8 reg) { u8 val = 0; if (g_smsc_fdc37m81x_base) val = smsc_fdc37m81x_rd(reg); return val; } void smsc_fdc37m81x_config_set(u8 reg, u8 val) { if (g_smsc_fdc37m81x_base) smsc_dc37m81x_wr(reg, val); } unsigned long __init smsc_fdc37m81x_init(unsigned long port) { const int field = sizeof(unsigned long) * 2; u8 chip_id; if (g_smsc_fdc37m81x_base) printk(KERN_WARNING "%s: stepping on old base=0x%0*lx\n", __func__, field, g_smsc_fdc37m81x_base); g_smsc_fdc37m81x_base = port; smsc_fdc37m81x_config_beg(); chip_id = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DID); if (chip_id == SMSC_FDC37M81X_CHIP_ID) smsc_fdc37m81x_config_end(); else { printk(KERN_WARNING "%s: unknown chip id 0x%02x\n", __func__, chip_id); g_smsc_fdc37m81x_base = 0; } return g_smsc_fdc37m81x_base; } #ifdef DEBUG static void smsc_fdc37m81x_config_dump_one(const char *key, u8 dev, u8 reg) { printk(KERN_INFO "%s: dev=0x%02x reg=0x%02x val=0x%02x\n", key, dev, reg, smsc_fdc37m81x_rd(reg)); } void smsc_fdc37m81x_config_dump(void) { u8 orig; const char *fname = __func__; smsc_fdc37m81x_config_beg(); orig = smsc_fdc37m81x_rd(SMSC_FDC37M81X_DNUM); printk(KERN_INFO "%s: common\n", fname); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DNUM); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DID); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_DREV); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_PCNT); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_NONE, SMSC_FDC37M81X_PMGT); printk(KERN_INFO "%s: keyboard\n", fname); smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, SMSC_FDC37M81X_KBD); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_ACTIVE); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_INT); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_INT2); smsc_fdc37m81x_config_dump_one(fname, SMSC_FDC37M81X_KBD, SMSC_FDC37M81X_LDCR_F0); smsc_dc37m81x_wr(SMSC_FDC37M81X_DNUM, orig); smsc_fdc37m81x_config_end(); } #endif
gpl-2.0
nikez/android_kernel_htc_msm8660
arch/mips/ath79/mach-ap121.c
4779
2191
/* * Atheros AP121 board support * * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "machtypes.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "dev-usb.h" #include "dev-wmac.h" #define AP121_GPIO_LED_WLAN 0 #define AP121_GPIO_LED_USB 1 #define AP121_GPIO_BTN_JUMPSTART 11 #define AP121_GPIO_BTN_RESET 12 #define AP121_KEYS_POLL_INTERVAL 20 /* msecs */ #define AP121_KEYS_DEBOUNCE_INTERVAL (3 * AP121_KEYS_POLL_INTERVAL) #define AP121_CAL_DATA_ADDR 0x1fff1000 static struct gpio_led ap121_leds_gpio[] __initdata = { { .name = "ap121:green:usb", .gpio = AP121_GPIO_LED_USB, .active_low = 0, }, { .name = "ap121:green:wlan", .gpio = AP121_GPIO_LED_WLAN, .active_low = 0, }, }; static struct gpio_keys_button ap121_gpio_keys[] __initdata = { { .desc = "jumpstart button", .type = EV_KEY, .code = KEY_WPS_BUTTON, .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, .gpio = AP121_GPIO_BTN_JUMPSTART, .active_low = 1, }, { .desc = "reset button", .type = EV_KEY, .code = KEY_RESTART, .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, .gpio = AP121_GPIO_BTN_RESET, .active_low = 1, } }; static struct spi_board_info ap121_spi_info[] = { { .bus_num = 0, .chip_select = 0, .max_speed_hz = 25000000, .modalias = "mx25l1606e", } }; static struct ath79_spi_platform_data ap121_spi_data = { .bus_num = 0, .num_chipselect = 1, }; static void __init ap121_setup(void) { u8 *cal_data = (u8 *) KSEG1ADDR(AP121_CAL_DATA_ADDR); ath79_register_leds_gpio(-1, ARRAY_SIZE(ap121_leds_gpio), ap121_leds_gpio); ath79_register_gpio_keys_polled(-1, AP121_KEYS_POLL_INTERVAL, ARRAY_SIZE(ap121_gpio_keys), ap121_gpio_keys); ath79_register_spi(&ap121_spi_data, ap121_spi_info, ARRAY_SIZE(ap121_spi_info)); ath79_register_usb(); ath79_register_wmac(cal_data); } MIPS_MACHINE(ATH79_MACH_AP121, "AP121", "Atheros AP121 reference board", ap121_setup);
gpl-2.0
D380/android_kernel_lge_msm8226
net/irda/irttp.c
5035
51777
/********************************************************************* * * Filename: irttp.c * Version: 1.2 * Description: Tiny Transport Protocol (TTP) implementation * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Sun Aug 31 20:14:31 1997 * Modified at: Wed Jan 5 11:31:27 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>, * All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ #include <linux/skbuff.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/export.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <net/irda/irda.h> #include <net/irda/irlap.h> #include <net/irda/irlmp.h> #include <net/irda/parameters.h> #include <net/irda/irttp.h> static struct irttp_cb *irttp; static void __irttp_close_tsap(struct tsap_cb *self); static int irttp_data_indication(void *instance, void *sap, struct sk_buff *skb); static int irttp_udata_indication(void *instance, void *sap, struct sk_buff *skb); static void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *); static void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 header_size, struct sk_buff *skb); static void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 header_size, struct sk_buff *skb); static void irttp_run_tx_queue(struct tsap_cb *self); static void irttp_run_rx_queue(struct tsap_cb *self); static void irttp_flush_queues(struct tsap_cb *self); static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb); static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self); static void irttp_todo_expired(unsigned long data); static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, int get); static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow); static void irttp_status_indication(void *instance, LINK_STATUS link, LOCK_STATUS lock); /* Information for parsing parameters in IrTTP */ static pi_minor_info_t pi_minor_call_table[] = { { NULL, 0 }, /* 0x00 */ { irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */ }; static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }}; static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 }; /************************ GLOBAL PROCEDURES ************************/ /* * Function irttp_init (void) * * Initialize the IrTTP layer. Called by module initialization code * */ int __init irttp_init(void) { irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL); if (irttp == NULL) return -ENOMEM; irttp->magic = TTP_MAGIC; irttp->tsaps = hashbin_new(HB_LOCK); if (!irttp->tsaps) { IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", __func__); kfree(irttp); return -ENOMEM; } return 0; } /* * Function irttp_cleanup (void) * * Called by module destruction/cleanup code * */ void irttp_cleanup(void) { /* Check for main structure */ IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); /* * Delete hashbin and close all TSAP instances in it */ hashbin_delete(irttp->tsaps, (FREE_FUNC) __irttp_close_tsap); irttp->magic = 0; /* De-allocate main structure */ kfree(irttp); irttp = NULL; } /*************************** SUBROUTINES ***************************/ /* * Function irttp_start_todo_timer (self, timeout) * * Start todo timer. * * Made it more effient and unsensitive to race conditions - Jean II */ static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout) { /* Set new value for timer */ mod_timer(&self->todo_timer, jiffies + timeout); } /* * Function irttp_todo_expired (data) * * Todo timer has expired! * * One of the restriction of the timer is that it is run only on the timer * interrupt which run every 10ms. This mean that even if you set the timer * with a delay of 0, it may take up to 10ms before it's run. * So, to minimise latency and keep cache fresh, we try to avoid using * it as much as possible. * Note : we can't use tasklets, because they can't be asynchronously * killed (need user context), and we can't guarantee that here... * Jean II */ static void irttp_todo_expired(unsigned long data) { struct tsap_cb *self = (struct tsap_cb *) data; /* Check that we still exist */ if (!self || self->magic != TTP_TSAP_MAGIC) return; IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); /* Try to make some progress, especially on Tx side - Jean II */ irttp_run_rx_queue(self); irttp_run_tx_queue(self); /* Check if time for disconnect */ if (test_bit(0, &self->disconnect_pend)) { /* Check if it's possible to disconnect yet */ if (skb_queue_empty(&self->tx_queue)) { /* Make sure disconnect is not pending anymore */ clear_bit(0, &self->disconnect_pend); /* FALSE */ /* Note : self->disconnect_skb may be NULL */ irttp_disconnect_request(self, self->disconnect_skb, P_NORMAL); self->disconnect_skb = NULL; } else { /* Try again later */ irttp_start_todo_timer(self, HZ/10); /* No reason to try and close now */ return; } } /* Check if it's closing time */ if (self->close_pend) /* Finish cleanup */ irttp_close_tsap(self); } /* * Function irttp_flush_queues (self) * * Flushes (removes all frames) in transitt-buffer (tx_list) */ static void irttp_flush_queues(struct tsap_cb *self) { struct sk_buff* skb; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); /* Deallocate frames waiting to be sent */ while ((skb = skb_dequeue(&self->tx_queue)) != NULL) dev_kfree_skb(skb); /* Deallocate received frames */ while ((skb = skb_dequeue(&self->rx_queue)) != NULL) dev_kfree_skb(skb); /* Deallocate received fragments */ while ((skb = skb_dequeue(&self->rx_fragments)) != NULL) dev_kfree_skb(skb); } /* * Function irttp_reassemble (self) * * Makes a new (continuous) skb of all the fragments in the fragment * queue * */ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) { struct sk_buff *skb, *frag; int n = 0; /* Fragment index */ IRDA_ASSERT(self != NULL, return NULL;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__, self->rx_sdu_size); skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); if (!skb) return NULL; /* * Need to reserve space for TTP header in case this skb needs to * be requeued in case delivery failes */ skb_reserve(skb, TTP_HEADER); skb_put(skb, self->rx_sdu_size); /* * Copy all fragments to a new buffer */ while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) { skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len); n += frag->len; dev_kfree_skb(frag); } IRDA_DEBUG(2, "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size * by summing the size of all fragments, so we should always * have n == self->rx_sdu_size, except in cases where we * droped the last fragment (when self->rx_sdu_size exceed * self->rx_max_sdu_size), where n < self->rx_sdu_size. * Jean II */ IRDA_ASSERT(n <= self->rx_sdu_size, n = self->rx_sdu_size;); /* Set the new length */ skb_trim(skb, n); self->rx_sdu_size = 0; return skb; } /* * Function irttp_fragment_skb (skb) * * Fragments a frame and queues all the fragments for transmission * */ static inline void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb) { struct sk_buff *frag; __u8 *frame; IRDA_DEBUG(2, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* * Split frame into a number of segments */ while (skb->len > self->max_seg_size) { IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__); /* Make new segment */ frag = alloc_skb(self->max_seg_size+self->max_header_size, GFP_ATOMIC); if (!frag) return; skb_reserve(frag, self->max_header_size); /* Copy data from the original skb into this fragment. */ skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size), self->max_seg_size); /* Insert TTP header, with the more bit set */ frame = skb_push(frag, TTP_HEADER); frame[0] = TTP_MORE; /* Hide the copied data from the original skb */ skb_pull(skb, self->max_seg_size); /* Queue fragment */ skb_queue_tail(&self->tx_queue, frag); } /* Queue what is left of the original skb */ IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__); frame = skb_push(skb, TTP_HEADER); frame[0] = 0x00; /* Clear more bit */ /* Queue fragment */ skb_queue_tail(&self->tx_queue, skb); } /* * Function irttp_param_max_sdu_size (self, param) * * Handle the MaxSduSize parameter in the connect frames, this function * will be called both when this parameter needs to be inserted into, and * extracted from the connect frames */ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, int get) { struct tsap_cb *self; self = instance; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); if (get) param->pv.i = self->tx_max_sdu_size; else self->tx_max_sdu_size = param->pv.i; IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i); return 0; } /*************************** CLIENT CALLS ***************************/ /************************** LMP CALLBACKS **************************/ /* Everything is happily mixed up. Waiting for next clean up - Jean II */ /* * Initialization, that has to be done on new tsap * instance allocation and on duplication */ static void irttp_init_tsap(struct tsap_cb *tsap) { spin_lock_init(&tsap->lock); init_timer(&tsap->todo_timer); skb_queue_head_init(&tsap->rx_queue); skb_queue_head_init(&tsap->tx_queue); skb_queue_head_init(&tsap->rx_fragments); } /* * Function irttp_open_tsap (stsap, notify) * * Create TSAP connection endpoint, */ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) { struct tsap_cb *self; struct lsap_cb *lsap; notify_t ttp_notify; IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;); /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to * use only 0x01-0x6F. Of course, we can use LSAP_ANY as well. * JeanII */ if((stsap_sel != LSAP_ANY) && ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__); return NULL; } self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); if (self == NULL) { IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__); return NULL; } /* Initialize internal objects */ irttp_init_tsap(self); /* Initialise todo timer */ self->todo_timer.data = (unsigned long) self; self->todo_timer.function = &irttp_todo_expired; /* Initialize callbacks for IrLMP to use */ irda_notify_init(&ttp_notify); ttp_notify.connect_confirm = irttp_connect_confirm; ttp_notify.connect_indication = irttp_connect_indication; ttp_notify.disconnect_indication = irttp_disconnect_indication; ttp_notify.data_indication = irttp_data_indication; ttp_notify.udata_indication = irttp_udata_indication; ttp_notify.flow_indication = irttp_flow_indication; if(notify->status_indication != NULL) ttp_notify.status_indication = irttp_status_indication; ttp_notify.instance = self; strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME); self->magic = TTP_TSAP_MAGIC; self->connected = FALSE; /* * Create LSAP at IrLMP layer */ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); if (lsap == NULL) { IRDA_WARNING("%s: unable to allocate LSAP!!\n", __func__); return NULL; } /* * If user specified LSAP_ANY as source TSAP selector, then IrLMP * will replace it with whatever source selector which is free, so * the stsap_sel we have might not be valid anymore */ self->stsap_sel = lsap->slsap_sel; IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); self->notify = *notify; self->lsap = lsap; hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL); if (credit > TTP_RX_MAX_CREDIT) self->initial_credit = TTP_RX_MAX_CREDIT; else self->initial_credit = credit; return self; } EXPORT_SYMBOL(irttp_open_tsap); /* * Function irttp_close (handle) * * Remove an instance of a TSAP. This function should only deal with the * deallocation of the TSAP, and resetting of the TSAPs values; * */ static void __irttp_close_tsap(struct tsap_cb *self) { /* First make sure we're connected. */ IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); irttp_flush_queues(self); del_timer(&self->todo_timer); /* This one won't be cleaned up if we are disconnect_pend + close_pend * and we receive a disconnect_indication */ if (self->disconnect_skb) dev_kfree_skb(self->disconnect_skb); self->connected = FALSE; self->magic = ~TTP_TSAP_MAGIC; kfree(self); } /* * Function irttp_close (self) * * Remove TSAP from list of all TSAPs and then deallocate all resources * associated with this TSAP * * Note : because we *free* the tsap structure, it is the responsibility * of the caller to make sure we are called only once and to deal with * possible race conditions. - Jean II */ int irttp_close_tsap(struct tsap_cb *self) { struct tsap_cb *tsap; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); /* Make sure tsap has been disconnected */ if (self->connected) { /* Check if disconnect is not pending */ if (!test_bit(0, &self->disconnect_pend)) { IRDA_WARNING("%s: TSAP still connected!\n", __func__); irttp_disconnect_request(self, NULL, P_NORMAL); } self->close_pend = TRUE; irttp_start_todo_timer(self, HZ/10); return 0; /* Will be back! */ } tsap = hashbin_remove(irttp->tsaps, (long) self, NULL); IRDA_ASSERT(tsap == self, return -1;); /* Close corresponding LSAP */ if (self->lsap) { irlmp_close_lsap(self->lsap); self->lsap = NULL; } __irttp_close_tsap(self); return 0; } EXPORT_SYMBOL(irttp_close_tsap); /* * Function irttp_udata_request (self, skb) * * Send unreliable data on this TSAP * */ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) { int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); IRDA_DEBUG(4, "%s()\n", __func__); /* Take shortcut on zero byte packets */ if (skb->len == 0) { ret = 0; goto err; } /* Check that nothing bad happens */ if (!self->connected) { IRDA_WARNING("%s(), Not connected\n", __func__); ret = -ENOTCONN; goto err; } if (skb->len > self->max_seg_size) { IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__); ret = -EMSGSIZE; goto err; } irlmp_udata_request(self->lsap, skb); self->stats.tx_packets++; return 0; err: dev_kfree_skb(skb); return ret; } EXPORT_SYMBOL(irttp_udata_request); /* * Function irttp_data_request (handle, skb) * * Queue frame for transmission. If SAR is enabled, fragement the frame * and queue the fragments for transmission */ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) { __u8 *frame; int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, skb_queue_len(&self->tx_queue)); /* Take shortcut on zero byte packets */ if (skb->len == 0) { ret = 0; goto err; } /* Check that nothing bad happens */ if (!self->connected) { IRDA_WARNING("%s: Not connected\n", __func__); ret = -ENOTCONN; goto err; } /* * Check if SAR is disabled, and the frame is larger than what fits * inside an IrLAP frame */ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", __func__); ret = -EMSGSIZE; goto err; } /* * Check if SAR is enabled, and the frame is larger than the * TxMaxSduSize */ if ((self->tx_max_sdu_size != 0) && (self->tx_max_sdu_size != TTP_SAR_UNBOUND) && (skb->len > self->tx_max_sdu_size)) { IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", __func__); ret = -EMSGSIZE; goto err; } /* * Check if transmit queue is full */ if (skb_queue_len(&self->tx_queue) >= TTP_TX_MAX_QUEUE) { /* * Give it a chance to empty itself */ irttp_run_tx_queue(self); /* Drop packet. This error code should trigger the caller * to resend the data in the client code - Jean II */ ret = -ENOBUFS; goto err; } /* Queue frame, or queue frame segments */ if ((self->tx_max_sdu_size == 0) || (skb->len < self->max_seg_size)) { /* Queue frame */ IRDA_ASSERT(skb_headroom(skb) >= TTP_HEADER, return -1;); frame = skb_push(skb, TTP_HEADER); frame[0] = 0x00; /* Clear more bit */ skb_queue_tail(&self->tx_queue, skb); } else { /* * Fragment the frame, this function will also queue the * fragments, we don't care about the fact the transmit * queue may be overfilled by all the segments for a little * while */ irttp_fragment_skb(self, skb); } /* Check if we can accept more data from client */ if ((!self->tx_sdu_busy) && (skb_queue_len(&self->tx_queue) > TTP_TX_HIGH_THRESHOLD)) { /* Tx queue filling up, so stop client. */ if (self->notify.flow_indication) { self->notify.flow_indication(self->notify.instance, self, FLOW_STOP); } /* self->tx_sdu_busy is the state of the client. * Update state after notifying client to avoid * race condition with irttp_flow_indication(). * If the queue empty itself after our test but before * we set the flag, we will fix ourselves below in * irttp_run_tx_queue(). * Jean II */ self->tx_sdu_busy = TRUE; } /* Try to make some progress */ irttp_run_tx_queue(self); return 0; err: dev_kfree_skb(skb); return ret; } EXPORT_SYMBOL(irttp_data_request); /* * Function irttp_run_tx_queue (self) * * Transmit packets queued for transmission (if possible) * */ static void irttp_run_tx_queue(struct tsap_cb *self) { struct sk_buff *skb; unsigned long flags; int n; IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", __func__, self->send_credit, skb_queue_len(&self->tx_queue)); /* Get exclusive access to the tx queue, otherwise don't touch it */ if (irda_lock(&self->tx_queue_lock) == FALSE) return; /* Try to send out frames as long as we have credits * and as long as LAP is not full. If LAP is full, it will * poll us through irttp_flow_indication() - Jean II */ while ((self->send_credit > 0) && (!irlmp_lap_tx_queue_full(self->lsap)) && (skb = skb_dequeue(&self->tx_queue))) { /* * Since we can transmit and receive frames concurrently, * the code below is a critical region and we must assure that * nobody messes with the credits while we update them. */ spin_lock_irqsave(&self->lock, flags); n = self->avail_credit; self->avail_credit = 0; /* Only room for 127 credits in frame */ if (n > 127) { self->avail_credit = n-127; n = 127; } self->remote_credit += n; self->send_credit--; spin_unlock_irqrestore(&self->lock, flags); /* * More bit must be set by the data_request() or fragment() * functions */ skb->data[0] |= (n & 0x7f); /* Detach from socket. * The current skb has a reference to the socket that sent * it (skb->sk). When we pass it to IrLMP, the skb will be * stored in in IrLAP (self->wx_list). When we are within * IrLAP, we lose the notion of socket, so we should not * have a reference to a socket. So, we drop it here. * * Why does it matter ? * When the skb is freed (kfree_skb), if it is associated * with a socket, it release buffer space on the socket * (through sock_wfree() and sock_def_write_space()). * If the socket no longer exist, we may crash. Hard. * When we close a socket, we make sure that associated packets * in IrTTP are freed. However, we have no way to cancel * the packet that we have passed to IrLAP. So, if a packet * remains in IrLAP (retry on the link or else) after we * close the socket, we are dead ! * Jean II */ if (skb->sk != NULL) { /* IrSOCK application, IrOBEX, ... */ skb_orphan(skb); } /* IrCOMM over IrTTP, IrLAN, ... */ /* Pass the skb to IrLMP - done */ irlmp_data_request(self->lsap, skb); self->stats.tx_packets++; } /* Check if we can accept more frames from client. * We don't want to wait until the todo timer to do that, and we * can't use tasklets (grr...), so we are obliged to give control * to client. That's ok, this test will be true not too often * (max once per LAP window) and we are called from places * where we can spend a bit of time doing stuff. - Jean II */ if ((self->tx_sdu_busy) && (skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) && (!self->close_pend)) { if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, self, FLOW_START); /* self->tx_sdu_busy is the state of the client. * We don't really have a race here, but it's always safer * to update our state after the client - Jean II */ self->tx_sdu_busy = FALSE; } /* Reset lock */ self->tx_queue_lock = 0; } /* * Function irttp_give_credit (self) * * Send a dataless flowdata TTP-PDU and give available credit to peer * TSAP */ static inline void irttp_give_credit(struct tsap_cb *self) { struct sk_buff *tx_skb = NULL; unsigned long flags; int n; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__, self->send_credit, self->avail_credit, self->remote_credit); /* Give credit to peer */ tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return; /* Reserve space for LMP, and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); /* * Since we can transmit and receive frames concurrently, * the code below is a critical region and we must assure that * nobody messes with the credits while we update them. */ spin_lock_irqsave(&self->lock, flags); n = self->avail_credit; self->avail_credit = 0; /* Only space for 127 credits in frame */ if (n > 127) { self->avail_credit = n - 127; n = 127; } self->remote_credit += n; spin_unlock_irqrestore(&self->lock, flags); skb_put(tx_skb, 1); tx_skb->data[0] = (__u8) (n & 0x7f); irlmp_data_request(self->lsap, tx_skb); self->stats.tx_packets++; } /* * Function irttp_udata_indication (instance, sap, skb) * * Received some unit-data (unreliable) * */ static int irttp_udata_indication(void *instance, void *sap, struct sk_buff *skb) { struct tsap_cb *self; int err; IRDA_DEBUG(4, "%s()\n", __func__); self = instance; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); self->stats.rx_packets++; /* Just pass data to layer above */ if (self->notify.udata_indication) { err = self->notify.udata_indication(self->notify.instance, self,skb); /* Same comment as in irttp_do_data_indication() */ if (!err) return 0; } /* Either no handler, or handler returns an error */ dev_kfree_skb(skb); return 0; } /* * Function irttp_data_indication (instance, sap, skb) * * Receive segment from IrLMP. * */ static int irttp_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct tsap_cb *self; unsigned long flags; int n; self = instance; n = skb->data[0] & 0x7f; /* Extract the credits */ self->stats.rx_packets++; /* Deal with inbound credit * Since we can transmit and receive frames concurrently, * the code below is a critical region and we must assure that * nobody messes with the credits while we update them. */ spin_lock_irqsave(&self->lock, flags); self->send_credit += n; if (skb->len > 1) self->remote_credit--; spin_unlock_irqrestore(&self->lock, flags); /* * Data or dataless packet? Dataless frames contains only the * TTP_HEADER. */ if (skb->len > 1) { /* * We don't remove the TTP header, since we must preserve the * more bit, so the defragment routing knows what to do */ skb_queue_tail(&self->rx_queue, skb); } else { /* Dataless flowdata TTP-PDU */ dev_kfree_skb(skb); } /* Push data to the higher layer. * We do it synchronously because running the todo timer for each * receive packet would be too much overhead and latency. * By passing control to the higher layer, we run the risk that * it may take time or grab a lock. Most often, the higher layer * will only put packet in a queue. * Anyway, packets are only dripping through the IrDA, so we can * have time before the next packet. * Further, we are run from NET_BH, so the worse that can happen is * us missing the optimal time to send back the PF bit in LAP. * Jean II */ irttp_run_rx_queue(self); /* We now give credits to peer in irttp_run_rx_queue(). * We need to send credit *NOW*, otherwise we are going * to miss the next Tx window. The todo timer may take * a while before it's run... - Jean II */ /* * If the peer device has given us some credits and we didn't have * anyone from before, then we need to shedule the tx queue. * We need to do that because our Tx have stopped (so we may not * get any LAP flow indication) and the user may be stopped as * well. - Jean II */ if (self->send_credit == n) { /* Restart pushing stuff to LAP */ irttp_run_tx_queue(self); /* Note : we don't want to schedule the todo timer * because it has horrible latency. No tasklets * because the tasklet API is broken. - Jean II */ } return 0; } /* * Function irttp_status_indication (self, reason) * * Status_indication, just pass to the higher layer... * */ static void irttp_status_indication(void *instance, LINK_STATUS link, LOCK_STATUS lock) { struct tsap_cb *self; IRDA_DEBUG(4, "%s()\n", __func__); self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); /* Check if client has already closed the TSAP and gone away */ if (self->close_pend) return; /* * Inform service user if he has requested it */ if (self->notify.status_indication != NULL) self->notify.status_indication(self->notify.instance, link, lock); else IRDA_DEBUG(2, "%s(), no handler\n", __func__); } /* * Function irttp_flow_indication (self, reason) * * Flow_indication : IrLAP tells us to send more data. * */ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) { struct tsap_cb *self; self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); /* We are "polled" directly from LAP, and the LAP want to fill * its Tx window. We want to do our best to send it data, so that * we maximise the window. On the other hand, we want to limit the * amount of work here so that LAP doesn't hang forever waiting * for packets. - Jean II */ /* Try to send some packets. Currently, LAP calls us every time * there is one free slot, so we will send only one packet. * This allow the scheduler to do its round robin - Jean II */ irttp_run_tx_queue(self); /* Note regarding the interraction with higher layer. * irttp_run_tx_queue() may call the client when its queue * start to empty, via notify.flow_indication(). Initially. * I wanted this to happen in a tasklet, to avoid client * grabbing the CPU, but we can't use tasklets safely. And timer * is definitely too slow. * This will happen only once per LAP window, and usually at * the third packet (unless window is smaller). LAP is still * doing mtt and sending first packet so it's sort of OK * to do that. Jean II */ /* If we need to send disconnect. try to do it now */ if(self->disconnect_pend) irttp_start_todo_timer(self, 0); } /* * Function irttp_flow_request (self, command) * * This function could be used by the upper layers to tell IrTTP to stop * delivering frames if the receive queues are starting to get full, or * to tell IrTTP to start delivering frames again. */ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); switch (flow) { case FLOW_STOP: IRDA_DEBUG(1, "%s(), flow stop\n", __func__); self->rx_sdu_busy = TRUE; break; case FLOW_START: IRDA_DEBUG(1, "%s(), flow start\n", __func__); self->rx_sdu_busy = FALSE; /* Client say he can accept more data, try to free our * queues ASAP - Jean II */ irttp_run_rx_queue(self); break; default: IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__); } } EXPORT_SYMBOL(irttp_flow_request); /* * Function irttp_connect_request (self, dtsap_sel, daddr, qos) * * Try to connect to remote destination TSAP selector * */ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, __u32 saddr, __u32 daddr, struct qos_info *qos, __u32 max_sdu_size, struct sk_buff *userdata) { struct sk_buff *tx_skb; __u8 *frame; __u8 n; IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size); IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); if (self->connected) { if(userdata) dev_kfree_skb(userdata); return -EISCONN; } /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); } else { tx_skb = userdata; /* * Check that the client has reserved enough space for * headers */ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER, { dev_kfree_skb(userdata); return -1; } ); } /* Initialize connection parameters */ self->connected = FALSE; self->avail_credit = 0; self->rx_max_sdu_size = max_sdu_size; self->rx_sdu_size = 0; self->rx_sdu_busy = FALSE; self->dtsap_sel = dtsap_sel; n = self->initial_credit; self->remote_credit = 0; self->send_credit = 0; /* * Give away max 127 credits for now */ if (n > 127) { self->avail_credit=n-127; n = 127; } self->remote_credit = n; /* SAR enabled? */ if (max_sdu_size > 0) { IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER), { dev_kfree_skb(tx_skb); return -1; } ); /* Insert SAR parameters */ frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER); frame[0] = TTP_PARAMETERS | n; frame[1] = 0x04; /* Length */ frame[2] = 0x01; /* MaxSduSize */ frame[3] = 0x02; /* Value length */ put_unaligned(cpu_to_be16((__u16) max_sdu_size), (__be16 *)(frame+4)); } else { /* Insert plain TTP header */ frame = skb_push(tx_skb, TTP_HEADER); /* Insert initial credit in frame */ frame[0] = n & 0x7f; } /* Connect with IrLMP. No QoS parameters for now */ return irlmp_connect_request(self->lsap, dtsap_sel, saddr, daddr, qos, tx_skb); } EXPORT_SYMBOL(irttp_connect_request); /* * Function irttp_connect_confirm (handle, qos, skb) * * Service user confirms TSAP connection with peer. * */ static void irttp_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct tsap_cb *self; int parameters; int ret; __u8 plen; __u8 n; IRDA_DEBUG(4, "%s()\n", __func__); self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); self->max_seg_size = max_seg_size - TTP_HEADER; self->max_header_size = max_header_size + TTP_HEADER; /* * Check if we have got some QoS parameters back! This should be the * negotiated QoS for the link. */ if (qos) { IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n", qos->baud_rate.bits); IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n", qos->baud_rate.value); } n = skb->data[0] & 0x7f; IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n); self->send_credit = n; self->tx_max_sdu_size = 0; self->connected = TRUE; parameters = skb->data[0] & 0x80; IRDA_ASSERT(skb->len >= TTP_HEADER, return;); skb_pull(skb, TTP_HEADER); if (parameters) { plen = skb->data[0]; ret = irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, plen), &param_info); /* Any errors in the parameter list? */ if (ret < 0) { IRDA_WARNING("%s: error extracting parameters\n", __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ return; } /* Remove parameters */ skb_pull(skb, IRDA_MIN(skb->len, plen+1)); } IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__, self->send_credit, self->avail_credit, self->remote_credit); IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__, self->tx_max_sdu_size); if (self->notify.connect_confirm) { self->notify.connect_confirm(self->notify.instance, self, qos, self->tx_max_sdu_size, self->max_header_size, skb); } else dev_kfree_skb(skb); } /* * Function irttp_connect_indication (handle, skb) * * Some other device is connecting to this TSAP * */ static void irttp_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_seg_size, __u8 max_header_size, struct sk_buff *skb) { struct tsap_cb *self; struct lsap_cb *lsap; int parameters; int ret; __u8 plen; __u8 n; self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); lsap = sap; self->max_seg_size = max_seg_size - TTP_HEADER; self->max_header_size = max_header_size+TTP_HEADER; IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); /* Need to update dtsap_sel if its equal to LSAP_ANY */ self->dtsap_sel = lsap->dlsap_sel; n = skb->data[0] & 0x7f; self->send_credit = n; self->tx_max_sdu_size = 0; parameters = skb->data[0] & 0x80; IRDA_ASSERT(skb->len >= TTP_HEADER, return;); skb_pull(skb, TTP_HEADER); if (parameters) { plen = skb->data[0]; ret = irda_param_extract_all(self, skb->data+1, IRDA_MIN(skb->len-1, plen), &param_info); /* Any errors in the parameter list? */ if (ret < 0) { IRDA_WARNING("%s: error extracting parameters\n", __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ return; } /* Remove parameters */ skb_pull(skb, IRDA_MIN(skb->len, plen+1)); } if (self->notify.connect_indication) { self->notify.connect_indication(self->notify.instance, self, qos, self->tx_max_sdu_size, self->max_header_size, skb); } else dev_kfree_skb(skb); } /* * Function irttp_connect_response (handle, userdata) * * Service user is accepting the connection, just pass it down to * IrLMP! * */ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, struct sk_buff *userdata) { struct sk_buff *tx_skb; __u8 *frame; int ret; __u8 n; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__, self->stsap_sel); /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER); } else { tx_skb = userdata; /* * Check that the client has reserved enough space for * headers */ IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER, { dev_kfree_skb(userdata); return -1; } ); } self->avail_credit = 0; self->remote_credit = 0; self->rx_max_sdu_size = max_sdu_size; self->rx_sdu_size = 0; self->rx_sdu_busy = FALSE; n = self->initial_credit; /* Frame has only space for max 127 credits (7 bits) */ if (n > 127) { self->avail_credit = n - 127; n = 127; } self->remote_credit = n; self->connected = TRUE; /* SAR enabled? */ if (max_sdu_size > 0) { IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER), { dev_kfree_skb(tx_skb); return -1; } ); /* Insert TTP header with SAR parameters */ frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER); frame[0] = TTP_PARAMETERS | n; frame[1] = 0x04; /* Length */ /* irda_param_insert(self, IRTTP_MAX_SDU_SIZE, frame+1, */ /* TTP_SAR_HEADER, &param_info) */ frame[2] = 0x01; /* MaxSduSize */ frame[3] = 0x02; /* Value length */ put_unaligned(cpu_to_be16((__u16) max_sdu_size), (__be16 *)(frame+4)); } else { /* Insert TTP header */ frame = skb_push(tx_skb, TTP_HEADER); frame[0] = n & 0x7f; } ret = irlmp_connect_response(self->lsap, tx_skb); return ret; } EXPORT_SYMBOL(irttp_connect_response); /* * Function irttp_dup (self, instance) * * Duplicate TSAP, can be used by servers to confirm a connection on a * new TSAP so it can keep listening on the old one. */ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) { struct tsap_cb *new; unsigned long flags; IRDA_DEBUG(1, "%s()\n", __func__); /* Protect our access to the old tsap instance */ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); /* Find the old instance */ if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } /* Allocate a new instance */ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } spin_lock_init(&new->lock); /* We don't need the old instance any more */ spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); /* Try to dup the LSAP (may fail if we were too slow) */ new->lsap = irlmp_dup(orig->lsap, new); if (!new->lsap) { IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); kfree(new); return NULL; } /* Not everything should be copied */ new->notify.instance = instance; /* Initialize internal objects */ irttp_init_tsap(new); /* This is locked */ hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL); return new; } EXPORT_SYMBOL(irttp_dup); /* * Function irttp_disconnect_request (self) * * Close this connection please! If priority is high, the queued data * segments, if any, will be deallocated first * */ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, int priority) { int ret; IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); /* Already disconnected? */ if (!self->connected) { IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__); if (userdata) dev_kfree_skb(userdata); return -1; } /* Disconnect already pending ? * We need to use an atomic operation to prevent reentry. This * function may be called from various context, like user, timer * for following a disconnect_indication() (i.e. net_bh). * Jean II */ if(test_and_set_bit(0, &self->disconnect_pend)) { IRDA_DEBUG(0, "%s(), disconnect already pending\n", __func__); if (userdata) dev_kfree_skb(userdata); /* Try to make some progress */ irttp_run_tx_queue(self); return -1; } /* * Check if there is still data segments in the transmit queue */ if (!skb_queue_empty(&self->tx_queue)) { if (priority == P_HIGH) { /* * No need to send the queued data, if we are * disconnecting right now since the data will * not have any usable connection to be sent on */ IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__); irttp_flush_queues(self); } else if (priority == P_NORMAL) { /* * Must delay disconnect until after all data segments * have been sent and the tx_queue is empty */ /* We'll reuse this one later for the disconnect */ self->disconnect_skb = userdata; /* May be NULL */ irttp_run_tx_queue(self); irttp_start_todo_timer(self, HZ/10); return -1; } } /* Note : we don't need to check if self->rx_queue is full and the * state of self->rx_sdu_busy because the disconnect response will * be sent at the LMP level (so even if the peer has its Tx queue * full of data). - Jean II */ IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__); self->connected = FALSE; if (!userdata) { struct sk_buff *tx_skb; tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) return -ENOMEM; /* * Reserve space for MUX and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); userdata = tx_skb; } ret = irlmp_disconnect_request(self->lsap, userdata); /* The disconnect is no longer pending */ clear_bit(0, &self->disconnect_pend); /* FALSE */ return ret; } EXPORT_SYMBOL(irttp_disconnect_request); /* * Function irttp_disconnect_indication (self, reason) * * Disconnect indication, TSAP disconnected by peer? * */ static void irttp_disconnect_indication(void *instance, void *sap, LM_REASON reason, struct sk_buff *skb) { struct tsap_cb *self; IRDA_DEBUG(4, "%s()\n", __func__); self = instance; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); /* Prevent higher layer to send more data */ self->connected = FALSE; /* Check if client has already tried to close the TSAP */ if (self->close_pend) { /* In this case, the higher layer is probably gone. Don't * bother it and clean up the remains - Jean II */ if (skb) dev_kfree_skb(skb); irttp_close_tsap(self); return; } /* If we are here, we assume that is the higher layer is still * waiting for the disconnect notification and able to process it, * even if he tried to disconnect. Otherwise, it would have already * attempted to close the tsap and self->close_pend would be TRUE. * Jean II */ /* No need to notify the client if has already tried to disconnect */ if(self->notify.disconnect_indication) self->notify.disconnect_indication(self->notify.instance, self, reason, skb); else if (skb) dev_kfree_skb(skb); } /* * Function irttp_do_data_indication (self, skb) * * Try to deliver reassembled skb to layer above, and requeue it if that * for some reason should fail. We mark rx sdu as busy to apply back * pressure is necessary. */ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb) { int err; /* Check if client has already closed the TSAP and gone away */ if (self->close_pend) { dev_kfree_skb(skb); return; } err = self->notify.data_indication(self->notify.instance, self, skb); /* Usually the layer above will notify that it's input queue is * starting to get filled by using the flow request, but this may * be difficult, so it can instead just refuse to eat it and just * give an error back */ if (err) { IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__); /* Make sure we take a break */ self->rx_sdu_busy = TRUE; /* Need to push the header in again */ skb_push(skb, TTP_HEADER); skb->data[0] = 0x00; /* Make sure MORE bit is cleared */ /* Put skb back on queue */ skb_queue_head(&self->rx_queue, skb); } } /* * Function irttp_run_rx_queue (self) * * Check if we have any frames to be transmitted, or if we have any * available credit to give away. */ static void irttp_run_rx_queue(struct tsap_cb *self) { struct sk_buff *skb; int more = 0; IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__, self->send_credit, self->avail_credit, self->remote_credit); /* Get exclusive access to the rx queue, otherwise don't touch it */ if (irda_lock(&self->rx_queue_lock) == FALSE) return; /* * Reassemble all frames in receive queue and deliver them */ while (!self->rx_sdu_busy && (skb = skb_dequeue(&self->rx_queue))) { /* This bit will tell us if it's the last fragment or not */ more = skb->data[0] & 0x80; /* Remove TTP header */ skb_pull(skb, TTP_HEADER); /* Add the length of the remaining data */ self->rx_sdu_size += skb->len; /* * If SAR is disabled, or user has requested no reassembly * of received fragments then we just deliver them * immediately. This can be requested by clients that * implements byte streams without any message boundaries */ if (self->rx_max_sdu_size == TTP_SAR_DISABLE) { irttp_do_data_indication(self, skb); self->rx_sdu_size = 0; continue; } /* Check if this is a fragment, and not the last fragment */ if (more) { /* * Queue the fragment if we still are within the * limits of the maximum size of the rx_sdu */ if (self->rx_sdu_size <= self->rx_max_sdu_size) { IRDA_DEBUG(4, "%s(), queueing frag\n", __func__); skb_queue_tail(&self->rx_fragments, skb); } else { /* Free the part of the SDU that is too big */ dev_kfree_skb(skb); } continue; } /* * This is the last fragment, so time to reassemble! */ if ((self->rx_sdu_size <= self->rx_max_sdu_size) || (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) { /* * A little optimizing. Only queue the fragment if * there are other fragments. Since if this is the * last and only fragment, there is no need to * reassemble :-) */ if (!skb_queue_empty(&self->rx_fragments)) { skb_queue_tail(&self->rx_fragments, skb); skb = irttp_reassemble_skb(self); } /* Now we can deliver the reassembled skb */ irttp_do_data_indication(self, skb); } else { IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__); /* Free the part of the SDU that is too big */ dev_kfree_skb(skb); /* Deliver only the valid but truncated part of SDU */ skb = irttp_reassemble_skb(self); irttp_do_data_indication(self, skb); } self->rx_sdu_size = 0; } /* * It's not trivial to keep track of how many credits are available * by incrementing at each packet, because delivery may fail * (irttp_do_data_indication() may requeue the frame) and because * we need to take care of fragmentation. * We want the other side to send up to initial_credit packets. * We have some frames in our queues, and we have already allowed it * to send remote_credit. * No need to spinlock, write is atomic and self correcting... * Jean II */ self->avail_credit = (self->initial_credit - (self->remote_credit + skb_queue_len(&self->rx_queue) + skb_queue_len(&self->rx_fragments))); /* Do we have too much credits to send to peer ? */ if ((self->remote_credit <= TTP_RX_MIN_CREDIT) && (self->avail_credit > 0)) { /* Send explicit credit frame */ irttp_give_credit(self); /* Note : do *NOT* check if tx_queue is non-empty, that * will produce deadlocks. I repeat : send a credit frame * even if we have something to send in our Tx queue. * If we have credits, it means that our Tx queue is blocked. * * Let's suppose the peer can't keep up with our Tx. He will * flow control us by not sending us any credits, and we * will stop Tx and start accumulating credits here. * Up to the point where the peer will stop its Tx queue, * for lack of credits. * Let's assume the peer application is single threaded. * It will block on Tx and never consume any Rx buffer. * Deadlock. Guaranteed. - Jean II */ } /* Reset lock */ self->rx_queue_lock = 0; } #ifdef CONFIG_PROC_FS struct irttp_iter_state { int id; }; static void *irttp_seq_start(struct seq_file *seq, loff_t *pos) { struct irttp_iter_state *iter = seq->private; struct tsap_cb *self; /* Protect our access to the tsap list */ spin_lock_irq(&irttp->tsaps->hb_spinlock); iter->id = 0; for (self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps); self != NULL; self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps)) { if (iter->id == *pos) break; ++iter->id; } return self; } static void *irttp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct irttp_iter_state *iter = seq->private; ++*pos; ++iter->id; return (void *) hashbin_get_next(irttp->tsaps); } static void irttp_seq_stop(struct seq_file *seq, void *v) { spin_unlock_irq(&irttp->tsaps->hb_spinlock); } static int irttp_seq_show(struct seq_file *seq, void *v) { const struct irttp_iter_state *iter = seq->private; const struct tsap_cb *self = v; seq_printf(seq, "TSAP %d, ", iter->id); seq_printf(seq, "stsap_sel: %02x, ", self->stsap_sel); seq_printf(seq, "dtsap_sel: %02x\n", self->dtsap_sel); seq_printf(seq, " connected: %s, ", self->connected? "TRUE":"FALSE"); seq_printf(seq, "avail credit: %d, ", self->avail_credit); seq_printf(seq, "remote credit: %d, ", self->remote_credit); seq_printf(seq, "send credit: %d\n", self->send_credit); seq_printf(seq, " tx packets: %lu, ", self->stats.tx_packets); seq_printf(seq, "rx packets: %lu, ", self->stats.rx_packets); seq_printf(seq, "tx_queue len: %u ", skb_queue_len(&self->tx_queue)); seq_printf(seq, "rx_queue len: %u\n", skb_queue_len(&self->rx_queue)); seq_printf(seq, " tx_sdu_busy: %s, ", self->tx_sdu_busy? "TRUE":"FALSE"); seq_printf(seq, "rx_sdu_busy: %s\n", self->rx_sdu_busy? "TRUE":"FALSE"); seq_printf(seq, " max_seg_size: %u, ", self->max_seg_size); seq_printf(seq, "tx_max_sdu_size: %u, ", self->tx_max_sdu_size); seq_printf(seq, "rx_max_sdu_size: %u\n", self->rx_max_sdu_size); seq_printf(seq, " Used by (%s)\n\n", self->notify.name); return 0; } static const struct seq_operations irttp_seq_ops = { .start = irttp_seq_start, .next = irttp_seq_next, .stop = irttp_seq_stop, .show = irttp_seq_show, }; static int irttp_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &irttp_seq_ops, sizeof(struct irttp_iter_state)); } const struct file_operations irttp_seq_fops = { .owner = THIS_MODULE, .open = irttp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* PROC_FS */
gpl-2.0
michael1900/falcon_stock
drivers/media/video/tm6000/tm6000-alsa.c
5035
12374
/* * * Support for audio capture for tm5600/6000/6010 * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com> * * Based on cx88-alsa.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include "tm6000.h" #include "tm6000-regs.h" #undef dprintk #define dprintk(level, fmt, arg...) do { \ if (debug >= level) \ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg); \ } while (0) /**************************************************************************** Module global static vars ****************************************************************************/ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1}; module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled."); module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s)."); /**************************************************************************** Module macros ****************************************************************************/ MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Trident,tm5600}," "{{Trident,tm6000}," "{{Trident,tm6010}"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); /**************************************************************************** Module specific funtions ****************************************************************************/ /* * BOARD Specific: Sets audio DMA */ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip) { struct tm6000_core *core = chip->core; dprintk(1, "Starting audio DMA\n"); /* Enables audio */ tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_IF, 0x40, 0x40); tm6000_set_audio_bitrate(core, 48000); return 0; } /* * BOARD Specific: Resets audio DMA */ static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip) { struct tm6000_core *core = chip->core; dprintk(1, "Stopping audio DMA\n"); /* Disables audio */ tm6000_set_reg_mask(core, TM6010_REQ07_RCC_ACTIVE_IF, 0x00, 0x40); return 0; } static void dsp_buffer_free(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); dprintk(2, "Freeing buffer\n"); vfree(substream->runtime->dma_area); substream->runtime->dma_area = NULL; substream->runtime->dma_bytes = 0; } static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); dprintk(2, "Allocating buffer\n"); if (substream->runtime->dma_area) { if (substream->runtime->dma_bytes > size) return 0; dsp_buffer_free(substream); } substream->runtime->dma_area = vmalloc(size); if (!substream->runtime->dma_area) return -ENOMEM; substream->runtime->dma_bytes = size; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static struct snd_pcm_hardware snd_tm6000_digital_hw = { .info = SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_KNOT, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .period_bytes_min = 64, .period_bytes_max = 12544, .periods_min = 2, .periods_max = 98, .buffer_bytes_max = 62720 * 8, }; /* * audio pcm capture open callback */ static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_tm6000_digital_hw; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); return 0; _error: dprintk(1, "Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_tm6000_close(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); struct tm6000_core *core = chip->core; if (atomic_read(&core->stream_started) > 0) { atomic_set(&core->stream_started, 0); schedule_work(&core->wq_trigger); } return 0; } static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size) { struct snd_tm6000_card *chip = core->adev; struct snd_pcm_substream *substream = chip->substream; struct snd_pcm_runtime *runtime; int period_elapsed = 0; unsigned int stride, buf_pos; int length; if (atomic_read(&core->stream_started) == 0) return 0; if (!size || !substream) { dprintk(1, "substream was NULL\n"); return -EINVAL; } runtime = substream->runtime; if (!runtime || !runtime->dma_area) { dprintk(1, "runtime was NULL\n"); return -EINVAL; } buf_pos = chip->buf_pos; stride = runtime->frame_bits >> 3; if (stride == 0) { dprintk(1, "stride is zero\n"); return -EINVAL; } length = size / stride; if (length == 0) { dprintk(1, "%s: length was zero\n", __func__); return -EINVAL; } dprintk(1, "Copying %d bytes at %p[%d] - buf size=%d x %d\n", size, runtime->dma_area, buf_pos, (unsigned int)runtime->buffer_size, stride); if (buf_pos + length >= runtime->buffer_size) { unsigned int cnt = runtime->buffer_size - buf_pos; memcpy(runtime->dma_area + buf_pos * stride, buf, cnt * stride); memcpy(runtime->dma_area, buf + cnt * stride, length * stride - cnt * stride); } else memcpy(runtime->dma_area + buf_pos * stride, buf, length * stride); snd_pcm_stream_lock(substream); chip->buf_pos += length; if (chip->buf_pos >= runtime->buffer_size) chip->buf_pos -= runtime->buffer_size; chip->period_pos += length; if (chip->period_pos >= runtime->period_size) { chip->period_pos -= runtime->period_size; period_elapsed = 1; } snd_pcm_stream_unlock(substream); if (period_elapsed) snd_pcm_period_elapsed(substream); return 0; } /* * hw_params callback */ static int snd_tm6000_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int size, rc; size = params_period_bytes(hw_params) * params_periods(hw_params); rc = dsp_buffer_alloc(substream, size); if (rc < 0) return rc; return 0; } /* * hw free callback */ static int snd_tm6000_hw_free(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); struct tm6000_core *core = chip->core; if (atomic_read(&core->stream_started) > 0) { atomic_set(&core->stream_started, 0); schedule_work(&core->wq_trigger); } dsp_buffer_free(substream); return 0; } /* * prepare callback */ static int snd_tm6000_prepare(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); chip->buf_pos = 0; chip->period_pos = 0; return 0; } /* * trigger callback */ static void audio_trigger(struct work_struct *work) { struct tm6000_core *core = container_of(work, struct tm6000_core, wq_trigger); struct snd_tm6000_card *chip = core->adev; if (atomic_read(&core->stream_started)) { dprintk(1, "starting capture"); _tm6000_start_audio_dma(chip); } else { dprintk(1, "stopping capture"); _tm6000_stop_audio_dma(chip); } } static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); struct tm6000_core *core = chip->core; int err = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: atomic_set(&core->stream_started, 1); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: atomic_set(&core->stream_started, 0); break; default: err = -EINVAL; break; } schedule_work(&core->wq_trigger); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream) { struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream); return chip->buf_pos; } static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset) { void *pageptr = subs->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_tm6000_pcm_ops = { .open = snd_tm6000_pcm_open, .close = snd_tm6000_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_tm6000_hw_params, .hw_free = snd_tm6000_hw_free, .prepare = snd_tm6000_prepare, .trigger = snd_tm6000_card_trigger, .pointer = snd_tm6000_pointer, .page = snd_pcm_get_vmalloc_page, }; /* * create a PCM device */ /* FIXME: Control interface - How to control volume/mute? */ /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * Alsa Constructor - Component probe */ static int tm6000_audio_init(struct tm6000_core *dev) { struct snd_card *card; struct snd_tm6000_card *chip; int rc; static int devnr; char component[14]; struct snd_pcm *pcm; if (!dev) return 0; if (devnr >= SNDRV_CARDS) return -ENODEV; if (!enable[devnr]) return -ENOENT; rc = snd_card_create(index[devnr], "tm6000", THIS_MODULE, 0, &card); if (rc < 0) { snd_printk(KERN_ERR "cannot create card instance %d\n", devnr); return rc; } strcpy(card->driver, "tm6000-alsa"); strcpy(card->shortname, "TM5600/60x0"); sprintf(card->longname, "TM5600/60x0 Audio at bus %d device %d", dev->udev->bus->busnum, dev->udev->devnum); sprintf(component, "USB%04x:%04x", le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct)); snd_component_add(card, component); snd_card_set_dev(card, &dev->udev->dev); chip = kzalloc(sizeof(struct snd_tm6000_card), GFP_KERNEL); if (!chip) { rc = -ENOMEM; goto error; } chip->core = dev; chip->card = card; dev->adev = chip; spin_lock_init(&chip->reg_lock); rc = snd_pcm_new(card, "TM6000 Audio", 0, 0, 1, &pcm); if (rc < 0) goto error_chip; pcm->info_flags = 0; pcm->private_data = chip; strcpy(pcm->name, "Trident TM5600/60x0"); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops); INIT_WORK(&dev->wq_trigger, audio_trigger); rc = snd_card_register(card); if (rc < 0) goto error_chip; dprintk(1, "Registered audio driver for %s\n", card->longname); return 0; error_chip: kfree(chip); dev->adev = NULL; error: snd_card_free(card); return rc; } static int tm6000_audio_fini(struct tm6000_core *dev) { struct snd_tm6000_card *chip = dev->adev; if (!dev) return 0; if (!chip) return 0; if (!chip->card) return 0; snd_card_free(chip->card); chip->card = NULL; kfree(chip); dev->adev = NULL; return 0; } static struct tm6000_ops audio_ops = { .type = TM6000_AUDIO, .name = "TM6000 Audio Extension", .init = tm6000_audio_init, .fini = tm6000_audio_fini, .fillbuf = tm6000_fillbuf, }; static int __init tm6000_alsa_register(void) { return tm6000_register_extension(&audio_ops); } static void __exit tm6000_alsa_unregister(void) { tm6000_unregister_extension(&audio_ops); } module_init(tm6000_alsa_register); module_exit(tm6000_alsa_unregister);
gpl-2.0
engstk/hammerhead
drivers/staging/media/lirc/lirc_igorplugusb.c
5035
13574
/* * lirc_igorplugusb - USB remote support for LIRC * * Supports the standard homebrew IgorPlugUSB receiver with Igor's firmware. * See http://www.cesko.host.sk/IgorPlugUSB/IgorPlug-USB%20(AVR)_eng.htm * * The device can only record bursts of up to 36 pulses/spaces. * Works fine with RC5. Longer commands lead to device buffer overrun. * (Maybe a better firmware or a microcontroller with more ram can help?) * * Version 0.1 [beta status] * * Copyright (C) 2004 Jan M. Hochstein * <hochstein@algo.informatik.tu-darmstadt.de> * * This driver was derived from: * Paul Miller <pmiller9@users.sourceforge.net> * "lirc_atiusb" module * Vladimir Dergachev <volodya@minspring.com>'s 2002 * "USB ATI Remote support" (input device) * Adrian Dewhurst <sailor-lk@sailorfrag.net>'s 2002 * "USB StreamZap remote driver" (LIRC) * Artur Lipowski <alipowski@kki.net.pl>'s 2002 * "lirc_dev" and "lirc_gpio" LIRC modules */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/usb.h> #include <linux/time.h> #include <media/lirc.h> #include <media/lirc_dev.h> /* module identification */ #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR \ "Jan M. Hochstein <hochstein@algo.informatik.tu-darmstadt.de>" #define DRIVER_DESC "Igorplug USB remote driver for LIRC" #define DRIVER_NAME "lirc_igorplugusb" /* debugging support */ #ifdef CONFIG_USB_DEBUG static bool debug = 1; #else static bool debug; #endif #define dprintk(fmt, args...) \ do { \ if (debug) \ printk(KERN_DEBUG fmt, ## args); \ } while (0) /* One mode2 pulse/space has 4 bytes. */ #define CODE_LENGTH sizeof(int) /* Igor's firmware cannot record bursts longer than 36. */ #define DEVICE_BUFLEN 36 /* * Header at the beginning of the device's buffer: * unsigned char data_length * unsigned char data_start (!=0 means ring-buffer overrun) * unsigned char counter (incremented by each burst) */ #define DEVICE_HEADERLEN 3 /* This is for the gap */ #define ADDITIONAL_LIRC_BYTES 2 /* times to poll per second */ #define SAMPLE_RATE 100 static int sample_rate = SAMPLE_RATE; /**** Igor's USB Request Codes */ #define SET_INFRABUFFER_EMPTY 1 /** * Params: none * Answer: empty */ #define GET_INFRACODE 2 /** * Params: * wValue: offset to begin reading infra buffer * * Answer: infra data */ #define SET_DATAPORT_DIRECTION 3 /** * Params: * wValue: (byte) 1 bit for each data port pin (0=in, 1=out) * * Answer: empty */ #define GET_DATAPORT_DIRECTION 4 /** * Params: none * * Answer: (byte) 1 bit for each data port pin (0=in, 1=out) */ #define SET_OUT_DATAPORT 5 /** * Params: * wValue: byte to write to output data port * * Answer: empty */ #define GET_OUT_DATAPORT 6 /** * Params: none * * Answer: least significant 3 bits read from output data port */ #define GET_IN_DATAPORT 7 /** * Params: none * * Answer: least significant 3 bits read from input data port */ #define READ_EEPROM 8 /** * Params: * wValue: offset to begin reading EEPROM * * Answer: EEPROM bytes */ #define WRITE_EEPROM 9 /** * Params: * wValue: offset to EEPROM byte * wIndex: byte to write * * Answer: empty */ #define SEND_RS232 10 /** * Params: * wValue: byte to send * * Answer: empty */ #define RECV_RS232 11 /** * Params: none * * Answer: byte received */ #define SET_RS232_BAUD 12 /** * Params: * wValue: byte to write to UART bit rate register (UBRR) * * Answer: empty */ #define GET_RS232_BAUD 13 /** * Params: none * * Answer: byte read from UART bit rate register (UBRR) */ /* data structure for each usb remote */ struct igorplug { /* usb */ struct usb_device *usbdev; int devnum; unsigned char *buf_in; unsigned int len_in; int in_space; struct timeval last_time; dma_addr_t dma_in; /* lirc */ struct lirc_driver *d; /* handle sending (init strings) */ int send_flags; }; static int unregister_from_lirc(struct igorplug *ir) { struct lirc_driver *d; int devnum; if (!ir) { printk(KERN_ERR "%s: called with NULL device struct!\n", __func__); return -EINVAL; } devnum = ir->devnum; d = ir->d; if (!d) { printk(KERN_ERR "%s: called with NULL lirc driver struct!\n", __func__); return -EINVAL; } dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum); lirc_unregister_driver(d->minor); kfree(d); ir->d = NULL; kfree(ir); return devnum; } static int set_use_inc(void *data) { struct igorplug *ir = data; if (!ir) { printk(DRIVER_NAME "[?]: set_use_inc called with no context\n"); return -EIO; } dprintk(DRIVER_NAME "[%d]: set use inc\n", ir->devnum); if (!ir->usbdev) return -ENODEV; return 0; } static void set_use_dec(void *data) { struct igorplug *ir = data; if (!ir) { printk(DRIVER_NAME "[?]: set_use_dec called with no context\n"); return; } dprintk(DRIVER_NAME "[%d]: set use dec\n", ir->devnum); } static void send_fragment(struct igorplug *ir, struct lirc_buffer *buf, int i, int max) { int code; /* MODE2: pulse/space (PULSE_BIT) in 1us units */ while (i < max) { /* 1 Igor-tick = 85.333333 us */ code = (unsigned int)ir->buf_in[i] * 85 + (unsigned int)ir->buf_in[i] / 3; ir->last_time.tv_usec += code; if (ir->in_space) code |= PULSE_BIT; lirc_buffer_write(buf, (unsigned char *)&code); /* 1 chunk = CODE_LENGTH bytes */ ir->in_space ^= 1; ++i; } } /** * Called in user context. * return 0 if data was added to the buffer and * -ENODATA if none was available. This should add some number of bits * evenly divisible by code_length to the buffer */ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf) { int ret; struct igorplug *ir = (struct igorplug *)data; if (!ir || !ir->usbdev) /* Has the device been removed? */ return -ENODEV; memset(ir->buf_in, 0, ir->len_in); ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), GET_INFRACODE, USB_TYPE_VENDOR | USB_DIR_IN, 0/* offset */, /*unused*/0, ir->buf_in, ir->len_in, /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); if (ret > 0) { int code, timediff; struct timeval now; /* ACK packet has 1 byte --> ignore */ if (ret < DEVICE_HEADERLEN) return -ENODATA; dprintk(DRIVER_NAME ": Got %d bytes. Header: %02x %02x %02x\n", ret, ir->buf_in[0], ir->buf_in[1], ir->buf_in[2]); do_gettimeofday(&now); timediff = now.tv_sec - ir->last_time.tv_sec; if (timediff + 1 > PULSE_MASK / 1000000) timediff = PULSE_MASK; else { timediff *= 1000000; timediff += now.tv_usec - ir->last_time.tv_usec; } ir->last_time.tv_sec = now.tv_sec; ir->last_time.tv_usec = now.tv_usec; /* create leading gap */ code = timediff; lirc_buffer_write(buf, (unsigned char *)&code); ir->in_space = 1; /* next comes a pulse */ if (ir->buf_in[2] == 0) send_fragment(ir, buf, DEVICE_HEADERLEN, ret); else { printk(KERN_WARNING DRIVER_NAME "[%d]: Device buffer overrun.\n", ir->devnum); /* HHHNNNNNNNNNNNOOOOOOOO H = header <---[2]---> N = newer <---------ret--------> O = older */ ir->buf_in[2] %= ret - DEVICE_HEADERLEN; /* sanitize */ /* keep even-ness to not desync pulse/pause */ send_fragment(ir, buf, DEVICE_HEADERLEN + ir->buf_in[2] - (ir->buf_in[2] & 1), ret); send_fragment(ir, buf, DEVICE_HEADERLEN, DEVICE_HEADERLEN + ir->buf_in[2]); } ret = usb_control_msg( ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, /*unused*/0, /*unused*/0, /*dummy*/ir->buf_in, /*dummy*/ir->len_in, /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); if (ret < 0) printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: " "error %d\n", ir->devnum, ret); return 0; } else if (ret < 0) printk(DRIVER_NAME "[%d]: GET_INFRACODE: error %d\n", ir->devnum, ret); return -ENODATA; } static int igorplugusb_remote_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = NULL; struct usb_host_interface *idesc = NULL; struct usb_endpoint_descriptor *ep; struct igorplug *ir = NULL; struct lirc_driver *driver = NULL; int devnum, pipe, maxp; int minor = 0; char buf[63], name[128] = ""; int mem_failure = 0; int ret; dprintk(DRIVER_NAME ": usb probe called.\n"); dev = interface_to_usbdev(intf); idesc = intf->cur_altsetting; if (idesc->desc.bNumEndpoints != 1) return -ENODEV; ep = &idesc->endpoint->desc; if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != USB_DIR_IN) || (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_CONTROL) return -ENODEV; pipe = usb_rcvctrlpipe(dev, ep->bEndpointAddress); devnum = dev->devnum; maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n", devnum, CODE_LENGTH, maxp); mem_failure = 0; ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL); if (!ir) { mem_failure = 1; goto mem_failure_switch; } driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL); if (!driver) { mem_failure = 2; goto mem_failure_switch; } ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN, GFP_ATOMIC, &ir->dma_in); if (!ir->buf_in) { mem_failure = 3; goto mem_failure_switch; } strcpy(driver->name, DRIVER_NAME " "); driver->minor = -1; driver->code_length = CODE_LENGTH * 8; /* in bits */ driver->features = LIRC_CAN_REC_MODE2; driver->data = ir; driver->chunk_size = CODE_LENGTH; driver->buffer_size = DEVICE_BUFLEN + ADDITIONAL_LIRC_BYTES; driver->set_use_inc = &set_use_inc; driver->set_use_dec = &set_use_dec; driver->sample_rate = sample_rate; /* per second */ driver->add_to_buf = &igorplugusb_remote_poll; driver->dev = &intf->dev; driver->owner = THIS_MODULE; minor = lirc_register_driver(driver); if (minor < 0) mem_failure = 9; mem_failure_switch: switch (mem_failure) { case 9: usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN, ir->buf_in, ir->dma_in); case 3: kfree(driver); case 2: kfree(ir); case 1: printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n", devnum, mem_failure); return -ENOMEM; } driver->minor = minor; ir->d = driver; ir->devnum = devnum; ir->usbdev = dev; ir->len_in = DEVICE_BUFLEN + DEVICE_HEADERLEN; ir->in_space = 1; /* First mode2 event is a space. */ do_gettimeofday(&ir->last_time); if (dev->descriptor.iManufacturer && usb_string(dev, dev->descriptor.iManufacturer, buf, sizeof(buf)) > 0) strlcpy(name, buf, sizeof(name)); if (dev->descriptor.iProduct && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0) snprintf(name + strlen(name), sizeof(name) - strlen(name), " %s", buf); printk(DRIVER_NAME "[%d]: %s on usb%d:%d\n", devnum, name, dev->bus->busnum, devnum); /* clear device buffer */ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), SET_INFRABUFFER_EMPTY, USB_TYPE_VENDOR|USB_DIR_IN, /*unused*/0, /*unused*/0, /*dummy*/ir->buf_in, /*dummy*/ir->len_in, /*timeout*/HZ * USB_CTRL_GET_TIMEOUT); if (ret < 0) printk(DRIVER_NAME "[%d]: SET_INFRABUFFER_EMPTY: error %d\n", devnum, ret); usb_set_intfdata(intf, ir); return 0; } static void igorplugusb_remote_disconnect(struct usb_interface *intf) { struct usb_device *usbdev = interface_to_usbdev(intf); struct igorplug *ir = usb_get_intfdata(intf); struct device *dev = &intf->dev; int devnum; usb_set_intfdata(intf, NULL); if (!ir || !ir->d) return; ir->usbdev = NULL; usb_free_coherent(usbdev, ir->len_in, ir->buf_in, ir->dma_in); devnum = unregister_from_lirc(ir); dev_info(dev, DRIVER_NAME "[%d]: %s done\n", devnum, __func__); } static struct usb_device_id igorplugusb_remote_id_table[] = { /* Igor Plug USB (Atmel's Manufact. ID) */ { USB_DEVICE(0x03eb, 0x0002) }, /* Fit PC2 Infrared Adapter */ { USB_DEVICE(0x03eb, 0x21fe) }, /* Terminating entry */ { } }; static struct usb_driver igorplugusb_remote_driver = { .name = DRIVER_NAME, .probe = igorplugusb_remote_probe, .disconnect = igorplugusb_remote_disconnect, .id_table = igorplugusb_remote_id_table }; module_usb_driver(igorplugusb_remote_driver); #include <linux/vermagic.h> MODULE_INFO(vermagic, VERMAGIC_STRING); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, igorplugusb_remote_id_table); module_param(sample_rate, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(sample_rate, "Sampling rate in Hz (default: 100)"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug enabled or not");
gpl-2.0
Phoenix-CJ23/stockkernel
Documentation/ia64/aliasing-test.c
8107
6132
/* * Exercise /dev/mem mmap cases that have been troublesome in the past * * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <stdlib.h> #include <stdio.h> #include <sys/types.h> #include <dirent.h> #include <fcntl.h> #include <fnmatch.h> #include <string.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <unistd.h> #include <linux/pci.h> int sum; static int map_mem(char *path, off_t offset, size_t length, int touch) { int fd, rc; void *addr; int *c; fd = open(path, O_RDWR); if (fd == -1) { perror(path); return -1; } if (fnmatch("/proc/bus/pci/*", path, 0) == 0) { rc = ioctl(fd, PCIIOC_MMAP_IS_MEM); if (rc == -1) perror("PCIIOC_MMAP_IS_MEM ioctl"); } addr = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset); if (addr == MAP_FAILED) return 1; if (touch) { c = (int *) addr; while (c < (int *) (addr + length)) sum += *c++; } rc = munmap(addr, length); if (rc == -1) { perror("munmap"); return -1; } close(fd); return 0; } static int scan_tree(char *path, char *file, off_t offset, size_t length, int touch) { struct dirent **namelist; char *name, *path2; int i, n, r, rc = 0, result = 0; struct stat buf; n = scandir(path, &namelist, 0, alphasort); if (n < 0) { perror("scandir"); return -1; } for (i = 0; i < n; i++) { name = namelist[i]->d_name; if (fnmatch(".", name, 0) == 0) goto skip; if (fnmatch("..", name, 0) == 0) goto skip; path2 = malloc(strlen(path) + strlen(name) + 3); strcpy(path2, path); strcat(path2, "/"); strcat(path2, name); if (fnmatch(file, name, 0) == 0) { rc = map_mem(path2, offset, length, touch); if (rc == 0) fprintf(stderr, "PASS: %s 0x%lx-0x%lx is %s\n", path2, offset, offset + length, touch ? "readable" : "mappable"); else if (rc > 0) fprintf(stderr, "PASS: %s 0x%lx-0x%lx not mappable\n", path2, offset, offset + length); else { fprintf(stderr, "FAIL: %s 0x%lx-0x%lx not accessible\n", path2, offset, offset + length); return rc; } } else { r = lstat(path2, &buf); if (r == 0 && S_ISDIR(buf.st_mode)) { rc = scan_tree(path2, file, offset, length, touch); if (rc < 0) return rc; } } result |= rc; free(path2); skip: free(namelist[i]); } free(namelist); return result; } char buf[1024]; static int read_rom(char *path) { int fd, rc; size_t size = 0; fd = open(path, O_RDWR); if (fd == -1) { perror(path); return -1; } rc = write(fd, "1", 2); if (rc <= 0) { perror("write"); return -1; } do { rc = read(fd, buf, sizeof(buf)); if (rc > 0) size += rc; } while (rc > 0); close(fd); return size; } static int scan_rom(char *path, char *file) { struct dirent **namelist; char *name, *path2; int i, n, r, rc = 0, result = 0; struct stat buf; n = scandir(path, &namelist, 0, alphasort); if (n < 0) { perror("scandir"); return -1; } for (i = 0; i < n; i++) { name = namelist[i]->d_name; if (fnmatch(".", name, 0) == 0) goto skip; if (fnmatch("..", name, 0) == 0) goto skip; path2 = malloc(strlen(path) + strlen(name) + 3); strcpy(path2, path); strcat(path2, "/"); strcat(path2, name); if (fnmatch(file, name, 0) == 0) { rc = read_rom(path2); /* * It's OK if the ROM is unreadable. Maybe there * is no ROM, or some other error occurred. The * important thing is that no MCA happened. */ if (rc > 0) fprintf(stderr, "PASS: %s read %d bytes\n", path2, rc); else { fprintf(stderr, "PASS: %s not readable\n", path2); return rc; } } else { r = lstat(path2, &buf); if (r == 0 && S_ISDIR(buf.st_mode)) { rc = scan_rom(path2, file); if (rc < 0) return rc; } } result |= rc; free(path2); skip: free(namelist[i]); } free(namelist); return result; } int main(void) { int rc; if (map_mem("/dev/mem", 0, 0xA0000, 1) == 0) fprintf(stderr, "PASS: /dev/mem 0x0-0xa0000 is readable\n"); else fprintf(stderr, "FAIL: /dev/mem 0x0-0xa0000 not accessible\n"); /* * It's not safe to blindly read the VGA frame buffer. If you know * how to poke the card the right way, it should respond, but it's * not safe in general. Many machines, e.g., Intel chipsets, cover * up a non-responding card by just returning -1, but others will * report the failure as a machine check. */ if (map_mem("/dev/mem", 0xA0000, 0x20000, 0) == 0) fprintf(stderr, "PASS: /dev/mem 0xa0000-0xc0000 is mappable\n"); else fprintf(stderr, "FAIL: /dev/mem 0xa0000-0xc0000 not accessible\n"); if (map_mem("/dev/mem", 0xC0000, 0x40000, 1) == 0) fprintf(stderr, "PASS: /dev/mem 0xc0000-0x100000 is readable\n"); else fprintf(stderr, "FAIL: /dev/mem 0xc0000-0x100000 not accessible\n"); /* * Often you can map all the individual pieces above (0-0xA0000, * 0xA0000-0xC0000, and 0xC0000-0x100000), but can't map the whole * thing at once. This is because the individual pieces use different * attributes, and there's no single attribute supported over the * whole region. */ rc = map_mem("/dev/mem", 0, 1024*1024, 0); if (rc == 0) fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 is mappable\n"); else if (rc > 0) fprintf(stderr, "PASS: /dev/mem 0x0-0x100000 not mappable\n"); else fprintf(stderr, "FAIL: /dev/mem 0x0-0x100000 not accessible\n"); scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 0xA0000, 1); scan_tree("/sys/class/pci_bus", "legacy_mem", 0xA0000, 0x20000, 0); scan_tree("/sys/class/pci_bus", "legacy_mem", 0xC0000, 0x40000, 1); scan_tree("/sys/class/pci_bus", "legacy_mem", 0, 1024*1024, 0); scan_rom("/sys/devices", "rom"); scan_tree("/proc/bus/pci", "??.?", 0, 0xA0000, 1); scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0); scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1); scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0); return rc; }
gpl-2.0
zaolin/android_kernel_samsung_msm8930-common
drivers/gpu/drm/gma500/intel_i2c.c
9899
4385
/* * Copyright © 2006-2007 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * Authors: * Eric Anholt <eric@anholt.net> */ #include <linux/export.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include "psb_drv.h" #include "psb_intel_reg.h" /* * Intel GPIO access functions */ #define I2C_RISEFALL_TIME 20 static int get_clock(void *data) { struct psb_intel_i2c_chan *chan = data; struct drm_device *dev = chan->drm_dev; u32 val; val = REG_READ(chan->reg); return (val & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct psb_intel_i2c_chan *chan = data; struct drm_device *dev = chan->drm_dev; u32 val; val = REG_READ(chan->reg); return (val & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct psb_intel_i2c_chan *chan = data; struct drm_device *dev = chan->drm_dev; u32 reserved = 0, clock_bits; /* On most chips, these bits must be preserved in software. */ reserved = REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); if (state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; REG_WRITE(chan->reg, reserved | clock_bits); udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ } static void set_data(void *data, int state_high) { struct psb_intel_i2c_chan *chan = data; struct drm_device *dev = chan->drm_dev; u32 reserved = 0, data_bits; /* On most chips, these bits must be preserved in software. */ reserved = REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); if (state_high) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; else data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; REG_WRITE(chan->reg, reserved | data_bits); udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ } /** * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg * @dev: DRM device * @output: driver specific output device * @reg: GPIO reg to use * @name: name for this bus * * Creates and registers a new i2c bus with the Linux i2c layer, for use * in output probing and control (e.g. DDC or SDVO control functions). * * Possible values for @reg include: * %GPIOA * %GPIOB * %GPIOC * %GPIOD * %GPIOE * %GPIOF * %GPIOG * %GPIOH * see PRM for details on how these different busses are used. */ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name) { struct psb_intel_i2c_chan *chan; chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL); if (!chan) goto out_free; chan->drm_dev = dev; chan->reg = reg; snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); chan->adapter.owner = THIS_MODULE; chan->adapter.algo_data = &chan->algo; chan->adapter.dev.parent = &dev->pdev->dev; chan->algo.setsda = set_data; chan->algo.setscl = set_clock; chan->algo.getsda = get_data; chan->algo.getscl = get_clock; chan->algo.udelay = 20; chan->algo.timeout = usecs_to_jiffies(2200); chan->algo.data = chan; i2c_set_adapdata(&chan->adapter, chan); if (i2c_bit_add_bus(&chan->adapter)) goto out_free; /* JJJ: raise SCL and SDA? */ set_data(chan, 1); set_clock(chan, 1); udelay(20); return chan; out_free: kfree(chan); return NULL; } /** * psb_intel_i2c_destroy - unregister and free i2c bus resources * @output: channel to free * * Unregister the adapter from the i2c layer, then free the structure. */ void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan) { if (!chan) return; i2c_del_adapter(&chan->adapter); kfree(chan); }
gpl-2.0
12thmantec/linux-3.5
drivers/uwb/beacon.c
11691
16601
/* * Ultra Wide Band * Beacon management * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kdev_t.h> #include <linux/slab.h> #include "uwb-internal.h" /* Start Beaconing command structure */ struct uwb_rc_cmd_start_beacon { struct uwb_rccb rccb; __le16 wBPSTOffset; u8 bChannelNumber; } __attribute__((packed)); static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel) { int result; struct uwb_rc_cmd_start_beacon *cmd; struct uwb_rc_evt_confirm reply; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON); cmd->wBPSTOffset = cpu_to_le16(bpst_offset); cmd->bChannelNumber = channel; reply.rceb.bEventType = UWB_RC_CET_GENERAL; reply.rceb.wEvent = UWB_RC_CMD_START_BEACON; result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd), &reply.rceb, sizeof(reply)); if (result < 0) goto error_cmd; if (reply.bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "START-BEACON: command execution failed: %s (%d)\n", uwb_rc_strerror(reply.bResultCode), reply.bResultCode); result = -EIO; } error_cmd: kfree(cmd); return result; } static int uwb_rc_stop_beacon(struct uwb_rc *rc) { int result; struct uwb_rccb *cmd; struct uwb_rc_evt_confirm reply; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) return -ENOMEM; cmd->bCommandType = UWB_RC_CET_GENERAL; cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON); reply.rceb.bEventType = UWB_RC_CET_GENERAL; reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON; result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd), &reply.rceb, sizeof(reply)); if (result < 0) goto error_cmd; if (reply.bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "STOP-BEACON: command execution failed: %s (%d)\n", uwb_rc_strerror(reply.bResultCode), reply.bResultCode); result = -EIO; } error_cmd: kfree(cmd); return result; } /* * Start/stop beacons * * @rc: UWB Radio Controller to operate on * @channel: UWB channel on which to beacon (WUSB[table * 5-12]). If -1, stop beaconing. * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero * * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB * of a SET IE command after the device sent the first beacon that includes * the IEs specified in the SET IE command. So, after we start beaconing we * check if there is anything in the IE cache and call the SET IE command * if needed. */ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) { int result; struct device *dev = &rc->uwb_dev.dev; if (channel < 0) channel = -1; if (channel == -1) result = uwb_rc_stop_beacon(rc); else { /* channel >= 0...dah */ result = uwb_rc_start_beacon(rc, bpst_offset, channel); if (result < 0) return result; if (le16_to_cpu(rc->ies->wIELength) > 0) { result = uwb_rc_set_ie(rc, rc->ies); if (result < 0) { dev_err(dev, "Cannot set new IE on device: " "%d\n", result); result = uwb_rc_stop_beacon(rc); channel = -1; bpst_offset = 0; } } } if (result >= 0) rc->beaconing = channel; return result; } /* * Beacon cache * * The purpose of this is to speed up the lookup of becon information * when a new beacon arrives. The UWB Daemon uses it also to keep a * tab of which devices are in radio distance and which not. When a * device's beacon stays present for more than a certain amount of * time, it is considered a new, usable device. When a beacon ceases * to be received for a certain amount of time, it is considered that * the device is gone. * * FIXME: use an allocator for the entries * FIXME: use something faster for search than a list */ void uwb_bce_kfree(struct kref *_bce) { struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); kfree(bce->be); kfree(bce); } /* Find a beacon by dev addr in the cache */ static struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, const struct uwb_dev_addr *dev_addr) { struct uwb_beca_e *bce, *next; list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) goto out; } bce = NULL; out: return bce; } /* Find a beacon by dev addr in the cache */ static struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, const struct uwb_mac_addr *mac_addr) { struct uwb_beca_e *bce, *next; list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { if (!memcmp(bce->mac_addr, mac_addr->data, sizeof(struct uwb_mac_addr))) goto out; } bce = NULL; out: return bce; } /** * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr * @rc: the radio controller that saw the device * @devaddr: DevAddr of the UWB device to find * * There may be more than one matching device (in the case of a * DevAddr conflict), but only the first one is returned. */ struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, const struct uwb_dev_addr *devaddr) { struct uwb_dev *found = NULL; struct uwb_beca_e *bce; mutex_lock(&rc->uwb_beca.mutex); bce = __uwb_beca_find_bydev(rc, devaddr); if (bce) found = uwb_dev_try_get(rc, bce->uwb_dev); mutex_unlock(&rc->uwb_beca.mutex); return found; } /** * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48 * @rc: the radio controller that saw the device * @devaddr: EUI-48 of the UWB device to find */ struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, const struct uwb_mac_addr *macaddr) { struct uwb_dev *found = NULL; struct uwb_beca_e *bce; mutex_lock(&rc->uwb_beca.mutex); bce = __uwb_beca_find_bymac(rc, macaddr); if (bce) found = uwb_dev_try_get(rc, bce->uwb_dev); mutex_unlock(&rc->uwb_beca.mutex); return found; } /* Initialize a beacon cache entry */ static void uwb_beca_e_init(struct uwb_beca_e *bce) { mutex_init(&bce->mutex); kref_init(&bce->refcnt); stats_init(&bce->lqe_stats); stats_init(&bce->rssi_stats); } /* * Add a beacon to the cache * * @be: Beacon event information * @bf: Beacon frame (part of b, really) * @ts_jiffies: Timestamp (in jiffies) when the beacon was received */ static struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, struct uwb_beacon_frame *bf, unsigned long ts_jiffies) { struct uwb_beca_e *bce; bce = kzalloc(sizeof(*bce), GFP_KERNEL); if (bce == NULL) return NULL; uwb_beca_e_init(bce); bce->ts_jiffies = ts_jiffies; bce->uwb_dev = NULL; list_add(&bce->node, &rc->uwb_beca.list); return bce; } /* * Wipe out beacon entries that became stale * * Remove associated devicest too. */ void uwb_beca_purge(struct uwb_rc *rc) { struct uwb_beca_e *bce, *next; unsigned long expires; mutex_lock(&rc->uwb_beca.mutex); list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); if (time_after(jiffies, expires)) { uwbd_dev_offair(bce); } } mutex_unlock(&rc->uwb_beca.mutex); } /* Clean up the whole beacon cache. Called on shutdown */ void uwb_beca_release(struct uwb_rc *rc) { struct uwb_beca_e *bce, *next; mutex_lock(&rc->uwb_beca.mutex); list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { list_del(&bce->node); uwb_bce_put(bce); } mutex_unlock(&rc->uwb_beca.mutex); } static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, struct uwb_beacon_frame *bf) { char macbuf[UWB_ADDR_STRSIZE]; char devbuf[UWB_ADDR_STRSIZE]; char dstbuf[UWB_ADDR_STRSIZE]; uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier); uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr); uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr); dev_info(&rc->uwb_dev.dev, "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n", devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset, bf->Beacon_Slot_Number, macbuf); } /* * @bce: beacon cache entry, referenced */ ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, char *buf, size_t size) { ssize_t result = 0; struct uwb_rc_evt_beacon *be; struct uwb_beacon_frame *bf; int ies_len; struct uwb_ie_hdr *ies; mutex_lock(&bce->mutex); be = bce->be; if (be) { bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); ies = (struct uwb_ie_hdr *)bf->IEData; result = uwb_ie_dump_hex(ies, ies_len, buf, size); } mutex_unlock(&bce->mutex); return result; } /* * Verify that the beacon event, frame and IEs are ok */ static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt, struct uwb_rc_evt_beacon *be) { int result = -EINVAL; struct uwb_beacon_frame *bf; struct device *dev = &rc->uwb_dev.dev; /* Is there enough data to decode a beacon frame? */ if (evt->notif.size < sizeof(*be) + sizeof(*bf)) { dev_err(dev, "BEACON event: Not enough data to decode " "(%zu vs %zu bytes needed)\n", evt->notif.size, sizeof(*be) + sizeof(*bf)); goto error; } /* FIXME: make sure beacon frame IEs are fine and that the whole thing * is consistent */ result = 0; error: return result; } /* * Handle UWB_RC_EVT_BEACON events * * We check the beacon cache to see how the received beacon fares. If * is there already we refresh the timestamp. If not we create a new * entry. * * According to the WHCI and WUSB specs, only one beacon frame is * allowed per notification block, so we don't bother about scanning * for more. */ int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) { int result = -EINVAL; struct uwb_rc *rc; struct uwb_rc_evt_beacon *be; struct uwb_beacon_frame *bf; struct uwb_beca_e *bce; unsigned long last_ts; rc = evt->rc; be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb); result = uwb_verify_beacon(rc, evt, be); if (result < 0) return result; /* FIXME: handle alien beacons. */ if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN || be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) { return -ENOSYS; } bf = (struct uwb_beacon_frame *) be->BeaconInfo; /* * Drop beacons from devices with a NULL EUI-48 -- they cannot * be uniquely identified. * * It's expected that these will all be WUSB devices and they * have a WUSB specific connection method so ignoring them * here shouldn't be a problem. */ if (uwb_mac_addr_bcast(&bf->Device_Identifier)) return 0; mutex_lock(&rc->uwb_beca.mutex); bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); if (bce == NULL) { /* Not in there, a new device is pinging */ uwb_beacon_print(evt->rc, be, bf); bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); if (bce == NULL) { mutex_unlock(&rc->uwb_beca.mutex); return -ENOMEM; } } mutex_unlock(&rc->uwb_beca.mutex); mutex_lock(&bce->mutex); /* purge old beacon data */ kfree(bce->be); last_ts = bce->ts_jiffies; /* Update commonly used fields */ bce->ts_jiffies = evt->ts_jiffies; bce->be = be; bce->dev_addr = bf->hdr.SrcAddr; bce->mac_addr = &bf->Device_Identifier; be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset); be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength); stats_add_sample(&bce->lqe_stats, be->bLQI - 7); stats_add_sample(&bce->rssi_stats, be->bRSSI + 18); /* * This might be a beacon from a new device. */ if (bce->uwb_dev == NULL) uwbd_dev_onair(evt->rc, bce); mutex_unlock(&bce->mutex); return 1; /* we keep the event data */ } /* * Handle UWB_RC_EVT_BEACON_SIZE events * * XXXXX */ int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt) { int result = -EINVAL; struct device *dev = &evt->rc->uwb_dev.dev; struct uwb_rc_evt_beacon_size *bs; /* Is there enough data to decode the event? */ if (evt->notif.size < sizeof(*bs)) { dev_err(dev, "BEACON SIZE notification: Not enough data to " "decode (%zu vs %zu bytes needed)\n", evt->notif.size, sizeof(*bs)); goto error; } bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb); if (0) dev_info(dev, "Beacon size changed to %u bytes " "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize)); else { /* temporary hack until we do something with this message... */ static unsigned count; if (++count % 1000 == 0) dev_info(dev, "Beacon size changed %u times " "(FIXME: action?)\n", count); } result = 0; error: return result; } /** * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event * @evt: the BP_SLOT_CHANGE notification from the radio controller * * If the event indicates that no beacon period slots were available * then radio controller has transitioned to a non-beaconing state. * Otherwise, simply save the current beacon slot. */ int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt) { struct uwb_rc *rc = evt->rc; struct device *dev = &rc->uwb_dev.dev; struct uwb_rc_evt_bp_slot_change *bpsc; if (evt->notif.size < sizeof(*bpsc)) { dev_err(dev, "BP SLOT CHANGE event: Not enough data\n"); return -EINVAL; } bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb); mutex_lock(&rc->uwb_dev.mutex); if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) { dev_info(dev, "stopped beaconing: No free slots in BP\n"); rc->beaconing = -1; } else rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc); mutex_unlock(&rc->uwb_dev.mutex); return 0; } /** * Handle UWB_RC_EVT_BPOIE_CHANGE events * * XXXXX */ struct uwb_ie_bpo { struct uwb_ie_hdr hdr; u8 bp_length; u8 data[]; } __attribute__((packed)); int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt) { int result = -EINVAL; struct device *dev = &evt->rc->uwb_dev.dev; struct uwb_rc_evt_bpoie_change *bpoiec; struct uwb_ie_bpo *bpoie; static unsigned count; /* FIXME: this is a temp hack */ size_t iesize; /* Is there enough data to decode it? */ if (evt->notif.size < sizeof(*bpoiec)) { dev_err(dev, "BPOIEC notification: Not enough data to " "decode (%zu vs %zu bytes needed)\n", evt->notif.size, sizeof(*bpoiec)); goto error; } bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb); iesize = le16_to_cpu(bpoiec->wBPOIELength); if (iesize < sizeof(*bpoie)) { dev_err(dev, "BPOIEC notification: Not enough IE data to " "decode (%zu vs %zu bytes needed)\n", iesize, sizeof(*bpoie)); goto error; } if (++count % 1000 == 0) /* Lame placeholder */ dev_info(dev, "BPOIE: %u changes received\n", count); /* * FIXME: At this point we should go over all the IEs in the * bpoiec->BPOIE array and act on each. */ result = 0; error: return result; } /* * Print beaconing state. */ static ssize_t uwb_rc_beacon_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; ssize_t result; mutex_lock(&rc->uwb_dev.mutex); result = sprintf(buf, "%d\n", rc->beaconing); mutex_unlock(&rc->uwb_dev.mutex); return result; } /* * Start beaconing on the specified channel, or stop beaconing. */ static ssize_t uwb_rc_beacon_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; int channel; ssize_t result = -EINVAL; result = sscanf(buf, "%d", &channel); if (result >= 1) result = uwb_radio_force_channel(rc, channel); return result < 0 ? result : size; } DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
gpl-2.0
CyanogenMod/android_kernel_asus_tf300t
arch/parisc/math-emu/sfsub.c
12203
14757
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/sfsub.c $Revision: 1.1 $ * * Purpose: * Single_subtract: subtract two single precision values. * * External Interfaces: * sgl_fsub(leftptr, rightptr, dstptr, status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" /* * Single_subtract: subtract two single precision values. */ int sgl_fsub( sgl_floating_point *leftptr, sgl_floating_point *rightptr, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int left, right, result, extent; register unsigned int signless_upper_left, signless_upper_right, save; register int result_exponent, right_exponent, diff_exponent; register int sign_save, jumpsize; register boolean inexact = FALSE, underflowtrap; /* Create local copies of the numbers */ left = *leftptr; right = *rightptr; /* A zero "save" helps discover equal operands (for later), * * and is used in swapping operands (if needed). */ Sgl_xortointp1(left,right,/*to*/save); /* * check first operand for NaN's or infinity */ if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT) { if (Sgl_iszero_mantissa(left)) { if (Sgl_isnotnan(right)) { if (Sgl_isinfinity(right) && save==0) { /* * invalid since operands are same signed infinity's */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); Set_invalidflag(); Sgl_makequietnan(result); *dstptr = result; return(NOEXCEPTION); } /* * return infinity */ *dstptr = left; return(NOEXCEPTION); } } else { /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(left)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(left); } /* * is second operand a signaling NaN? */ else if (Sgl_is_signalingnan(right)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(right); *dstptr = right; return(NOEXCEPTION); } /* * return quiet NaN */ *dstptr = left; return(NOEXCEPTION); } } /* End left NaN or Infinity processing */ /* * check second operand for NaN's or infinity */ if (Sgl_isinfinity_exponent(right)) { if (Sgl_iszero_mantissa(right)) { /* return infinity */ Sgl_invert_sign(right); *dstptr = right; return(NOEXCEPTION); } /* * is NaN; signaling or quiet? */ if (Sgl_isone_signaling(right)) { /* trap if INVALIDTRAP enabled */ if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION); /* make NaN quiet */ Set_invalidflag(); Sgl_set_quiet(right); } /* * return quiet NaN */ *dstptr = right; return(NOEXCEPTION); } /* End right NaN or Infinity processing */ /* Invariant: Must be dealing with finite numbers */ /* Compare operands by removing the sign */ Sgl_copytoint_exponentmantissa(left,signless_upper_left); Sgl_copytoint_exponentmantissa(right,signless_upper_right); /* sign difference selects sub or add operation. */ if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right)) { /* Set the left operand to the larger one by XOR swap * * First finish the first word using "save" */ Sgl_xorfromintp1(save,right,/*to*/right); Sgl_xorfromintp1(save,left,/*to*/left); result_exponent = Sgl_exponent(left); Sgl_invert_sign(left); } /* Invariant: left is not smaller than right. */ if((right_exponent = Sgl_exponent(right)) == 0) { /* Denormalized operands. First look for zeroes */ if(Sgl_iszero_mantissa(right)) { /* right is zero */ if(Sgl_iszero_exponentmantissa(left)) { /* Both operands are zeros */ Sgl_invert_sign(right); if(Is_rounding_mode(ROUNDMINUS)) { Sgl_or_signs(left,/*with*/right); } else { Sgl_and_signs(left,/*with*/right); } } else { /* Left is not a zero and must be the result. Trapped * underflows are signaled if left is denormalized. Result * is always exact. */ if( (result_exponent == 0) && Is_underflowtrap_enabled() ) { /* need to normalize results mantissa */ sign_save = Sgl_signextendedsign(left); Sgl_leftshiftby1(left); Sgl_normalize(left,result_exponent); Sgl_set_sign(left,/*using*/sign_save); Sgl_setwrapped_exponent(left,result_exponent,unfl); *dstptr = left; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } } *dstptr = left; return(NOEXCEPTION); } /* Neither are zeroes */ Sgl_clear_sign(right); /* Exponent is already cleared */ if(result_exponent == 0 ) { /* Both operands are denormalized. The result must be exact * and is simply calculated. A sum could become normalized and a * difference could cancel to a true zero. */ if( (/*signed*/int) save >= 0 ) { Sgl_subtract(left,/*minus*/right,/*into*/result); if(Sgl_iszero_mantissa(result)) { if(Is_rounding_mode(ROUNDMINUS)) { Sgl_setone_sign(result); } else { Sgl_setzero_sign(result); } *dstptr = result; return(NOEXCEPTION); } } else { Sgl_addition(left,right,/*into*/result); if(Sgl_isone_hidden(result)) { *dstptr = result; return(NOEXCEPTION); } } if(Is_underflowtrap_enabled()) { /* need to normalize result */ sign_save = Sgl_signextendedsign(result); Sgl_leftshiftby1(result); Sgl_normalize(result,result_exponent); Sgl_set_sign(result,/*using*/sign_save); Sgl_setwrapped_exponent(result,result_exponent,unfl); *dstptr = result; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } *dstptr = result; return(NOEXCEPTION); } right_exponent = 1; /* Set exponent to reflect different bias * with denomalized numbers. */ } else { Sgl_clear_signexponent_set_hidden(right); } Sgl_clear_exponent_set_hidden(left); diff_exponent = result_exponent - right_exponent; /* * Special case alignment of operands that would force alignment * beyond the extent of the extension. A further optimization * could special case this but only reduces the path length for this * infrequent case. */ if(diff_exponent > SGL_THRESHOLD) { diff_exponent = SGL_THRESHOLD; } /* Align right operand by shifting to right */ Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent, /*and lower to*/extent); /* Treat sum and difference of the operands separately. */ if( (/*signed*/int) save >= 0 ) { /* * Difference of the two operands. Their can be no overflow. A * borrow can occur out of the hidden bit and force a post * normalization phase. */ Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result); if(Sgl_iszero_hidden(result)) { /* Handle normalization */ /* A straightforward algorithm would now shift the result * and extension left until the hidden bit becomes one. Not * all of the extension bits need participate in the shift. * Only the two most significant bits (round and guard) are * needed. If only a single shift is needed then the guard * bit becomes a significant low order bit and the extension * must participate in the rounding. If more than a single * shift is needed, then all bits to the right of the guard * bit are zeros, and the guard bit may or may not be zero. */ sign_save = Sgl_signextendedsign(result); Sgl_leftshiftby1_withextent(result,extent,result); /* Need to check for a zero result. The sign and exponent * fields have already been zeroed. The more efficient test * of the full object can be used. */ if(Sgl_iszero(result)) /* Must have been "x-x" or "x+(-x)". */ { if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result); *dstptr = result; return(NOEXCEPTION); } result_exponent--; /* Look to see if normalization is finished. */ if(Sgl_isone_hidden(result)) { if(result_exponent==0) { /* Denormalized, exponent should be zero. Left operand * * was normalized, so extent (guard, round) was zero */ goto underflow; } else { /* No further normalization is needed. */ Sgl_set_sign(result,/*using*/sign_save); Ext_leftshiftby1(extent); goto round; } } /* Check for denormalized, exponent should be zero. Left * * operand was normalized, so extent (guard, round) was zero */ if(!(underflowtrap = Is_underflowtrap_enabled()) && result_exponent==0) goto underflow; /* Shift extension to complete one bit of normalization and * update exponent. */ Ext_leftshiftby1(extent); /* Discover first one bit to determine shift amount. Use a * modified binary search. We have already shifted the result * one position right and still not found a one so the remainder * of the extension must be zero and simplifies rounding. */ /* Scan bytes */ while(Sgl_iszero_hiddenhigh7mantissa(result)) { Sgl_leftshiftby8(result); if((result_exponent -= 8) <= 0 && !underflowtrap) goto underflow; } /* Now narrow it down to the nibble */ if(Sgl_iszero_hiddenhigh3mantissa(result)) { /* The lower nibble contains the normalizing one */ Sgl_leftshiftby4(result); if((result_exponent -= 4) <= 0 && !underflowtrap) goto underflow; } /* Select case were first bit is set (already normalized) * otherwise select the proper shift. */ if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7) { /* Already normalized */ if(result_exponent <= 0) goto underflow; Sgl_set_sign(result,/*using*/sign_save); Sgl_set_exponent(result,/*using*/result_exponent); *dstptr = result; return(NOEXCEPTION); } Sgl_sethigh4bits(result,/*using*/sign_save); switch(jumpsize) { case 1: { Sgl_leftshiftby3(result); result_exponent -= 3; break; } case 2: case 3: { Sgl_leftshiftby2(result); result_exponent -= 2; break; } case 4: case 5: case 6: case 7: { Sgl_leftshiftby1(result); result_exponent -= 1; break; } } if(result_exponent > 0) { Sgl_set_exponent(result,/*using*/result_exponent); *dstptr = result; /* Sign bit is already set */ return(NOEXCEPTION); } /* Fixup potential underflows */ underflow: if(Is_underflowtrap_enabled()) { Sgl_set_sign(result,sign_save); Sgl_setwrapped_exponent(result,result_exponent,unfl); *dstptr = result; /* inexact = FALSE */ return(UNDERFLOWEXCEPTION); } /* * Since we cannot get an inexact denormalized result, * we can now return. */ Sgl_right_align(result,/*by*/(1-result_exponent),extent); Sgl_clear_signexponent(result); Sgl_set_sign(result,sign_save); *dstptr = result; return(NOEXCEPTION); } /* end if(hidden...)... */ /* Fall through and round */ } /* end if(save >= 0)... */ else { /* Add magnitudes */ Sgl_addition(left,right,/*to*/result); if(Sgl_isone_hiddenoverflow(result)) { /* Prenormalization required. */ Sgl_rightshiftby1_withextent(result,extent,extent); Sgl_arithrightshiftby1(result); result_exponent++; } /* end if hiddenoverflow... */ } /* end else ...sub magnitudes... */ /* Round the result. If the extension is all zeros,then the result is * exact. Otherwise round in the correct direction. No underflow is * possible. If a postnormalization is necessary, then the mantissa is * all zeros so no shift is needed. */ round: if(Ext_isnotzero(extent)) { inexact = TRUE; switch(Rounding_mode()) { case ROUNDNEAREST: /* The default. */ if(Ext_isone_sign(extent)) { /* at least 1/2 ulp */ if(Ext_isnotzero_lower(extent) || Sgl_isone_lowmantissa(result)) { /* either exactly half way and odd or more than 1/2ulp */ Sgl_increment(result); } } break; case ROUNDPLUS: if(Sgl_iszero_sign(result)) { /* Round up positive results */ Sgl_increment(result); } break; case ROUNDMINUS: if(Sgl_isone_sign(result)) { /* Round down negative results */ Sgl_increment(result); } case ROUNDZERO:; /* truncate is simple */ } /* end switch... */ if(Sgl_isone_hiddenoverflow(result)) result_exponent++; } if(result_exponent == SGL_INFINITY_EXPONENT) { /* Overflow */ if(Is_overflowtrap_enabled()) { Sgl_setwrapped_exponent(result,result_exponent,ovfl); *dstptr = result; if (inexact) if (Is_inexacttrap_enabled()) return(OVERFLOWEXCEPTION | INEXACTEXCEPTION); else Set_inexactflag(); return(OVERFLOWEXCEPTION); } else { Set_overflowflag(); inexact = TRUE; Sgl_setoverflow(result); } } else Sgl_set_exponent(result,result_exponent); *dstptr = result; if(inexact) if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION); else Set_inexactflag(); return(NOEXCEPTION); }
gpl-2.0
GCSAdmin/GCS-SQL-2.0
dbug/tests.c
172
1661
/* A program to test DBUG features. Used by tests-t.pl */ char *push1=0; #include <my_global.h> /* This includes dbug.h */ #include <my_pthread.h> #include <string.h> const char *func3() { DBUG_ENTER("func3"); DBUG_RETURN(DBUG_EVALUATE("ret3", "ok", "ko")); } void func2() { const char *s; DBUG_ENTER("func2"); s=func3(); DBUG_PRINT("info", ("s=%s", s)); DBUG_VOID_RETURN; } int func1() { DBUG_ENTER("func1"); func2(); if (push1) { DBUG_PUSH(push1); fprintf(DBUG_FILE, "=> push1\n"); } DBUG_RETURN(10); } int main (int argc, char *argv[]) { int i; #ifdef DBUG_OFF return 1; #endif if (argc == 1) return 0; my_thread_global_init(); dup2(1, 2); for (i = 1; i < argc; i++) { if (strncmp(argv[i], "--push1=", 8) == 0) push1=argv[i]+8; else DBUG_PUSH (argv[i]); } { DBUG_ENTER ("main"); DBUG_PROCESS ("dbug-tests"); func1(); DBUG_EXECUTE_IF("dump", { char s[1000]; DBUG_EXPLAIN(s, sizeof(s)-1); DBUG_DUMP("dump", (uchar*)s, strlen(s)); }); DBUG_EXECUTE_IF("push", DBUG_PUSH("+t"); ); DBUG_EXECUTE("execute", fprintf(DBUG_FILE, "=> execute\n"); ); DBUG_EXECUTE_IF("set", DBUG_SET("+F"); ); fprintf(DBUG_FILE, "=> evaluate: %s\n", DBUG_EVALUATE("evaluate", "ON", "OFF")); fprintf(DBUG_FILE, "=> evaluate_if: %s\n", DBUG_EVALUATE_IF("evaluate_if", "ON", "OFF")); DBUG_EXECUTE_IF("pop", DBUG_POP(); ); { char s[1000] __attribute__((unused)); DBUG_EXPLAIN(s, sizeof(s)-1); DBUG_PRINT("explain", ("dbug explained: %s", s)); } func2(); DBUG_RETURN (0); } }
gpl-2.0
Lydux/gcc-4.6.2-human68k
gcc/testsuite/gcc.target/i386/sse4_1-init-v16qi-1.c
172
1677
/* { dg-do run } */ /* { dg-require-effective-target sse4 } */ /* { dg-options "-O2 -msse4.1" } */ #include "sse4_1-check.h" #ifdef DEBUG #include <stdio.h> #endif #include <emmintrin.h> static void __attribute__((noinline)) check (__m128i x, unsigned char *v, int j) { union { __m128i x; unsigned char i[16]; } u; unsigned int i; u.x = x; for (i = 0; i < sizeof (v) / sizeof (v[0]); i++) if (i == j) { if (v[i] != u.i[i]) { #ifdef DEBUG printf ("%i: 0x%x != 0x%x\n", i, v[i], u.i[i]); #endif abort (); } } else if (u.i[i] != 0) { #ifdef DEBUG printf ("%i: 0x%x != 0\n", i, u.i[i]); #endif abort (); } } static void __attribute__((noinline)) test (unsigned char *v) { __m128i x; x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[0]); check (x, v, 0); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[1], 0); check (x, v, 1); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[2], 0, 0); check (x, v, 2); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[3], 0, 0, 0); check (x, v, 3); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[4], 0, 0, 0, 0); check (x, v, 4); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, v[5], 0, 0, 0, 0, 0); check (x, v, 5); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, 0, v[6], 0, 0, 0, 0, 0, 0); check (x, v, 6); x = _mm_set_epi8 (0, 0, 0, 0, 0, 0, 0, 0, v[7], 0, 0, 0, 0, 0, 0, 0); check (x, v, 7); } static void sse4_1_test (void) { unsigned char v[16] = { 0x7B, 0x5B, 0x54, 0x65, 0x73, 0x74, 0x56, 0x65, 0x63, 0x74, 0x6F, 0x72, 0x5D, 0x53, 0x47, 0x5D }; test (v); }
gpl-2.0
israelpz/imx53_linux
drivers/staging/rtl8187se/r8180_wx.c
940
39334
/* This file contains wireless extension handlers. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver. Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon. Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. We want to tanks the Authors of those projects and the Ndiswrapper project Authors. */ #include "r8180.h" #include "r8180_hw.h" #include "ieee80211/dot11d.h" //#define RATE_COUNT 4 u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000, 6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000}; #define RATE_COUNT ARRAY_SIZE(rtl8180_rates) static CHANNEL_LIST DefaultChannelPlan[] = { // {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14}, //Default channel plan {{1,2,3,4,5,6,7,8,9,10,11,36,40,44,48,52,56,60,64},19}, //FCC {{1,2,3,4,5,6,7,8,9,10,11},11}, //IC {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //ETSI {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //Spain. Change to ETSI. {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //France. Change to ETSI. {{14,36,40,44,48,52,56,60,64},9}, //MKK {{1,2,3,4,5,6,7,8,9,10,11,12,13,14, 36,40,44,48,52,56,60,64},22},//MKK1 {{1,2,3,4,5,6,7,8,9,10,11,12,13,36,40,44,48,52,56,60,64},21}, //Israel. {{1,2,3,4,5,6,7,8,9,10,11,12,13,34,38,42,46},17}, // For 11a , TELEC {{1,2,3,4,5,6,7,8,9,10,11,12,13,14},14} //For Global Domain. 1-11:active scan, 12-14 passive scan. //+YJ, 080626 }; static int r8180_wx_get_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b); } int r8180_wx_set_key(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); struct iw_point *erq = &(wrqu->encoding); if(priv->ieee80211->bHwRadioOff) return 0; if (erq->flags & IW_ENCODE_DISABLED) { } /* i = erq->flags & IW_ENCODE_INDEX; if (i < 1 || i > 4) */ if (erq->length > 0) { //int len = erq->length <= 5 ? 5 : 13; u32* tkey= (u32*) key; priv->key0[0] = tkey[0]; priv->key0[1] = tkey[1]; priv->key0[2] = tkey[2]; priv->key0[3] = tkey[3] &0xff; DMESG("Setting wep key to %x %x %x %x", tkey[0],tkey[1],tkey[2],tkey[3]); rtl8180_set_hw_wep(dev); } return 0; } static int r8180_wx_set_beaconinterval(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *b) { int *parms = (int *)b; int bi = parms[0]; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); DMESG("setting beacon interval to %x",bi); priv->ieee80211->current_network.beacon_interval=bi; rtl8180_commit(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_get_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_mode(priv->ieee80211,a,wrqu,b); } static int r8180_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_rate(priv->ieee80211,info,wrqu,extra); } static int r8180_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_rate(priv->ieee80211,info,wrqu,extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_crcmon(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = priv->crcmon; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if(enable) priv->crcmon=1; else priv->crcmon=0; DMESG("bad CRC in monitor mode are %s", priv->crcmon ? "accepted" : "rejected"); if(prev != priv->crcmon && priv->up){ rtl8180_down(dev); rtl8180_up(dev); } up(&priv->wx_sem); return 0; } static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); // printk("set mode ENABLE_IPS\n"); if(priv->bInactivePs){ if(wrqu->mode == IW_MODE_ADHOC) IPSLeave(dev); } ret = ieee80211_wx_set_mode(priv->ieee80211,a,wrqu,b); //rtl8180_commit(dev); up(&priv->wx_sem); return ret; } //YJ,add,080819,for hidden ap struct iw_range_with_scan_capa { /* Informative stuff (to choose between different interface) */ __u32 throughput; /* To give an idea... */ /* In theory this value should be the maximum benchmarked * TCP/IP throughput, because with most of these devices the * bit rate is meaningless (overhead an co) to estimate how * fast the connection will go and pick the fastest one. * I suggest people to play with Netperf or any benchmark... */ /* NWID (or domain id) */ __u32 min_nwid; /* Minimal NWID we are able to set */ __u32 max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ __u16 old_num_channels; __u8 old_num_frequency; /* Scan capabilities */ __u8 scan_capa; }; //YJ,add,080819,for hidden ap static int rtl8180_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_range *range = (struct iw_range *)extra; struct r8180_priv *priv = ieee80211_priv(dev); u16 val; int i; //struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range; //YJ,add,080819,for hidden ap wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; // TODO: Not used in 802.11b? // range->min_nwid; /* Minimal NWID we are able to set */ // TODO: Not used in 802.11b? // range->max_nwid; /* Maximal NWID we are able to set */ /* Old Frequency (backward compat - moved lower ) */ // range->old_num_channels; // range->old_num_frequency; // range->old_freq[6]; /* Filler to keep "version" at the same offset */ if(priv->rf_set_sens != NULL) range->sensitivity = priv->max_sens; /* signal level threshold range */ range->max_qual.qual = 100; /* TODO: Find real max RSSI and stick here */ range->max_qual.level = 0; range->max_qual.noise = -98; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 20 + -98; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) { range->bitrate[i] = rtl8180_rates[i]; } range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->pm_capa = 0; range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 16; // range->retry_capa; /* What retry options are supported */ // range->retry_flags; /* How to decode max/min retry limit */ // range->r_time_flags; /* How to decode max/min retry life */ // range->min_retry; /* Minimal number of retries */ // range->max_retry; /* Maximal number of retries */ // range->min_r_time; /* Minimal retry lifetime */ // range->max_r_time; /* Maximal retry lifetime */ range->num_channels = 14; for (i = 0, val = 0; i < 14; i++) { // Include only legal frequencies for some countries if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) { range->freq[val].i = i + 1; range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000; range->freq[val].e = 1; val++; } else { // FIXME: do we need to set anything for channels // we don't use ? } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; //tmp->scan_capa = 0x01; //YJ,add,080819,for hidden ap return 0; } static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; struct ieee80211_device* ieee = priv->ieee80211; if(priv->ieee80211->bHwRadioOff) return 0; //YJ,add,080819, for hidden ap //printk("==*&*&*&==>%s in\n", __func__); //printk("=*&*&*&*===>flag:%x, %x\n", wrqu->data.flags, IW_SCAN_THIS_ESSID); if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { struct iw_scan_req* req = (struct iw_scan_req*)b; if (req->essid_len) { //printk("==**&*&*&**===>scan set ssid:%s\n", req->essid); ieee->current_network.ssid_len = req->essid_len; memcpy(ieee->current_network.ssid, req->essid, req->essid_len); //printk("=====>network ssid:%s\n", ieee->current_network.ssid); } } //YJ,add,080819, for hidden ap, end down(&priv->wx_sem); if(priv->up){ // printk("set scan ENABLE_IPS\n"); priv->ieee80211->actscanning = true; if(priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)){ IPSLeave(dev); // down(&priv->ieee80211->wx_sem); // if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || !(priv->ieee80211->proto_started)){ // ret = -1; // up(&priv->ieee80211->wx_sem); // up(&priv->wx_sem); // return ret; // } // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); //printk("start scan============================>\n"); ieee80211_softmac_ips_scan_syncro(priv->ieee80211); //ieee80211_rtl_start_scan(priv->ieee80211); /* intentionally forget to up sem */ // up(&priv->ieee80211->wx_sem); ret = 0; } else { //YJ,add,080828, prevent scan in BusyTraffic //FIXME: Need to consider last scan time if ((priv->link_detect.bBusyTraffic) && (true)) { ret = 0; printk("Now traffic is busy, please try later!\n"); } else //YJ,add,080828, prevent scan in BusyTraffic,end ret = ieee80211_wx_set_scan(priv->ieee80211,a,wrqu,b); } } else ret = -1; up(&priv->wx_sem); return ret; } static int r8180_wx_get_scan(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); if(priv->up) ret = ieee80211_wx_get_scan(priv->ieee80211,a,wrqu,b); else ret = -1; up(&priv->wx_sem); return ret; } static int r8180_wx_set_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); //printk("set essid ENABLE_IPS\n"); if(priv->bInactivePs) IPSLeave(dev); // printk("haha:set essid %s essid_len = %d essid_flgs = %d\n",b, wrqu->essid.length, wrqu->essid.flags); ret = ieee80211_wx_set_essid(priv->ieee80211,a,wrqu,b); up(&priv->wx_sem); return ret; } static int r8180_wx_get_essid(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8180_wx_set_freq(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b); up(&priv->wx_sem); return ret; } static int r8180_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra); } static int r8180_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; if (wrqu->frag.disabled) priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD; else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->ieee80211->fts = wrqu->frag.value & ~0x1; } return 0; } static int r8180_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->frag.value = priv->ieee80211->fts; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD); return 0; } static int r8180_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_wap(priv->ieee80211,info,awrq,extra); up(&priv->wx_sem); return ret; } static int r8180_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_wap(priv->ieee80211,info,wrqu,extra); } static int r8180_wx_set_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if(priv->hw_wep) ret = r8180_wx_set_key(dev,info,wrqu,key); else{ DMESG("Setting SW wep key"); ret = ieee80211_wx_set_encode(priv->ieee80211,info,wrqu,key); } up(&priv->wx_sem); return ret; } static int r8180_wx_get_enc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct r8180_priv *priv = ieee80211_priv(dev); return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key); } static int r8180_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p){ struct r8180_priv *priv = ieee80211_priv(dev); int *parms=(int*)p; int mode=parms[0]; if(priv->ieee80211->bHwRadioOff) return 0; priv->ieee80211->active_scan = mode; return 1; } /* added by christian */ /* static int r8180_wx_set_monitor_type(struct net_device *dev, struct iw_request_info *aa, union iwreq_data *wrqu, char *p){ struct r8180_priv *priv = ieee80211_priv(dev); int *parms=(int*)p; int mode=parms[0]; if(priv->ieee80211->iw_mode != IW_MODE_MONITOR) return -1; priv->prism_hdr = mode; if(!mode)dev->type=ARPHRD_IEEE80211; else dev->type=ARPHRD_IEEE80211_PRISM; DMESG("using %s RX encap", mode ? "AVS":"80211"); return 0; } */ //of r8180_wx_set_monitor_type /* end added christian */ static int r8180_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int err = 0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled){ err = -EINVAL; goto exit; } if (!(wrqu->retry.flags & IW_RETRY_LIMIT)){ err = -EINVAL; goto exit; } if(wrqu->retry.value > R8180_MAX_RETRY){ err= -EINVAL; goto exit; } if (wrqu->retry.flags & IW_RETRY_MAX) { priv->retry_rts = wrqu->retry.value; DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value); }else { priv->retry_data = wrqu->retry.value; DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value); } /* FIXME ! * We might try to write directly the TX config register * or to restart just the (R)TX process. * I'm unsure if whole reset is really needed */ rtl8180_commit(dev); /* if(priv->up){ rtl8180_rtx_disable(dev); rtl8180_rx_enable(dev); rtl8180_tx_enable(dev); } */ exit: up(&priv->wx_sem); return err; } static int r8180_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_MAX) { wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX; wrqu->retry.value = priv->retry_rts; } else { wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN; wrqu->retry.value = priv->retry_data; } //DMESG("returning %d",wrqu->retry.value); return 0; } static int r8180_wx_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if(priv->rf_set_sens == NULL) return -1; /* we have not this support for this radio */ wrqu->sens.value = priv->sens; return 0; } static int r8180_wx_set_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); short err = 0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); //DMESG("attempt to set sensivity to %ddb",wrqu->sens.value); if(priv->rf_set_sens == NULL) { err= -1; /* we have not this support for this radio */ goto exit; } if(priv->rf_set_sens(dev, wrqu->sens.value) == 0) priv->sens = wrqu->sens.value; else err= -EINVAL; exit: up(&priv->wx_sem); return err; } static int r8180_wx_set_rawtx(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int ret; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); ret = ieee80211_wx_get_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); printk("=>>>>>>>>>>=============================>set power:%d,%d!\n",wrqu->power.disabled, wrqu->power.flags); if (wrqu->power.disabled==0) { wrqu->power.flags|=IW_POWER_ALL_R; wrqu->power.flags|=IW_POWER_TIMEOUT; wrqu->power.value =1000; } ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; if (wrqu->rts.disabled) priv->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; priv->rts = wrqu->rts.value; } return 0; } static int r8180_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); wrqu->rts.value = priv->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == 0); return 0; } static int dummy(struct net_device *dev, struct iw_request_info *a, union iwreq_data *wrqu,char *b) { return -1; } /* static int r8180_wx_get_psmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); if(priv) { ieee = priv->ieee80211; if(ieee->ps == IEEE80211_PS_DISABLED) { *((unsigned int *)extra) = IEEE80211_PS_DISABLED; goto exit; } *((unsigned int *)extra) = IW_POWER_TIMEOUT; if (ieee->ps & IEEE80211_PS_MBCAST) *((unsigned int *)extra) |= IW_POWER_ALL_R; else *((unsigned int *)extra) |= IW_POWER_UNICAST_R; } else ret = -1; exit: up(&priv->wx_sem); return ret; } static int r8180_wx_set_psmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } */ static int r8180_wx_get_iwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee; int ret = 0; down(&priv->wx_sem); ieee = priv->ieee80211; strcpy(extra, "802.11"); if(ieee->modulation & IEEE80211_CCK_MODULATION) { strcat(extra, "b"); if(ieee->modulation & IEEE80211_OFDM_MODULATION) strcat(extra, "/g"); } else if(ieee->modulation & IEEE80211_OFDM_MODULATION) strcat(extra, "g"); up(&priv->wx_sem); return ret; } static int r8180_wx_set_iwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device *ieee = priv->ieee80211; int *param = (int *)extra; int ret = 0; int modulation = 0, mode = 0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (*param == 1) { modulation |= IEEE80211_CCK_MODULATION; mode = IEEE_B; printk(KERN_INFO "B mode!\n"); } else if (*param == 2) { modulation |= IEEE80211_OFDM_MODULATION; mode = IEEE_G; printk(KERN_INFO "G mode!\n"); } else if (*param == 3) { modulation |= IEEE80211_CCK_MODULATION; modulation |= IEEE80211_OFDM_MODULATION; mode = IEEE_B|IEEE_G; printk(KERN_INFO "B/G mode!\n"); } if(ieee->proto_started) { ieee80211_stop_protocol(ieee); ieee->mode = mode; ieee->modulation = modulation; ieee80211_start_protocol(ieee); } else { ieee->mode = mode; ieee->modulation = modulation; // ieee80211_start_protocol(ieee); } up(&priv->wx_sem); return ret; } static int r8180_wx_get_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); *extra = (char) priv->plcp_preamble_mode; // 0:auto 1:short 2:long up(&priv->wx_sem); return 0; } static int r8180_wx_set_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); int ret = 0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); if (*extra<0||*extra>2) ret = -1; else priv->plcp_preamble_mode = *((short *)extra) ; up(&priv->wx_sem); return ret; } static int r8180_wx_get_siglevel(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_network *network = &(priv->ieee80211->current_network); int ret = 0; down(&priv->wx_sem); // Modify by hikaru 6.5 *((int *)extra) = priv->wstats.qual.level;//for interface test ,it should be the priv->wstats.qual.level; up(&priv->wx_sem); return ret; } static int r8180_wx_get_sigqual(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_network *network = &(priv->ieee80211->current_network); int ret = 0; down(&priv->wx_sem); // Modify by hikaru 6.5 *((int *)extra) = priv->wstats.qual.qual;//for interface test ,it should be the priv->wstats.qual.qual; up(&priv->wx_sem); return ret; } static int r8180_wx_reset_stats(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv =ieee80211_priv(dev); down(&priv->wx_sem); priv->stats.txrdu = 0; priv->stats.rxrdu = 0; priv->stats.rxnolast = 0; priv->stats.rxnodata = 0; priv->stats.rxnopointer = 0; priv->stats.txnperr = 0; priv->stats.txresumed = 0; priv->stats.rxerr = 0; priv->stats.rxoverflow = 0; priv->stats.rxint = 0; priv->stats.txnpokint = 0; priv->stats.txhpokint = 0; priv->stats.txhperr = 0; priv->stats.ints = 0; priv->stats.shints = 0; priv->stats.txoverflow = 0; priv->stats.rxdmafail = 0; priv->stats.txbeacon = 0; priv->stats.txbeaconerr = 0; priv->stats.txlpokint = 0; priv->stats.txlperr = 0; priv->stats.txretry =0;//20060601 priv->stats.rxcrcerrmin=0; priv->stats.rxcrcerrmid=0; priv->stats.rxcrcerrmax=0; priv->stats.rxicverr=0; up(&priv->wx_sem); return 0; } static int r8180_wx_radio_on(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv =ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); priv->rf_wakeup(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_radio_off(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv =ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); priv->rf_sleep(dev); up(&priv->wx_sem); return 0; } static int r8180_wx_get_channelplan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); down(&priv->wx_sem); *extra = priv->channel_plan; up(&priv->wx_sem); return 0; } static int r8180_wx_set_channelplan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_device *ieee = netdev_priv(dev); int *val = (int *)extra; int i; printk("-----in fun %s\n", __func__); if(priv->ieee80211->bHwRadioOff) return 0; //unsigned long flags; down(&priv->wx_sem); if (DefaultChannelPlan[*val].Len != 0){ priv ->channel_plan = *val; // Clear old channel map for (i=1;i<=MAX_CHANNEL_NUMBER;i++) { GET_DOT11D_INFO(priv->ieee80211)->channel_map[i] = 0; } // Set new channel map for (i=1;i<=DefaultChannelPlan[*val].Len;i++) { GET_DOT11D_INFO(priv->ieee80211)->channel_map[DefaultChannelPlan[*val].Channel[i-1]] = 1; } } up(&priv->wx_sem); return 0; } static int r8180_wx_get_version(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //struct ieee80211_device *ieee; down(&priv->wx_sem); strcpy(extra, "1020.0808"); up(&priv->wx_sem); return 0; } //added by amy 080818 //receive datarate from user typing valid rate is from 2 to 108 (1 - 54M), if input 0, return to normal rate adaptive. static int r8180_wx_set_forcerate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); u8 forcerate = *extra; down(&priv->wx_sem); printk("==============>%s(): forcerate is %d\n",__func__,forcerate); if((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) || (forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) || (forcerate == 96) || (forcerate == 108)) { priv->ForcedDataRate = 1; priv->ieee80211->rate = forcerate * 5; } else if(forcerate == 0) { priv->ForcedDataRate = 0; printk("OK! return rate adaptive\n"); } else printk("ERR: wrong rate\n"); up(&priv->wx_sem); return 0; } static int r8180_wx_set_enc_ext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct r8180_priv *priv = ieee80211_priv(dev); //printk("===>%s()\n", __func__); int ret=0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //printk("====>%s()\n", __func__); struct r8180_priv *priv = ieee80211_priv(dev); int ret=0; if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); ret = ieee80211_wx_set_auth(priv->ieee80211, info, &wrqu->param, extra); up(&priv->wx_sem); return ret; } static int r8180_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { //printk("====>%s()\n", __func__); int ret=0; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); #if 1 ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra); #endif up(&priv->wx_sem); return ret; } static int r8180_wx_set_gen_ie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { // printk("====>%s(), len:%d\n", __func__, data->length); int ret=0; struct r8180_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->bHwRadioOff) return 0; down(&priv->wx_sem); #if 1 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length); #endif up(&priv->wx_sem); //printk("<======%s(), ret:%d\n", __func__, ret); return ret; } static iw_handler r8180_wx_handlers[] = { NULL, /* SIOCSIWCOMMIT */ r8180_wx_get_name, /* SIOCGIWNAME */ dummy, /* SIOCSIWNWID */ dummy, /* SIOCGIWNWID */ r8180_wx_set_freq, /* SIOCSIWFREQ */ r8180_wx_get_freq, /* SIOCGIWFREQ */ r8180_wx_set_mode, /* SIOCSIWMODE */ r8180_wx_get_mode, /* SIOCGIWMODE */ r8180_wx_set_sens, /* SIOCSIWSENS */ r8180_wx_get_sens, /* SIOCGIWSENS */ NULL, /* SIOCSIWRANGE */ rtl8180_wx_get_range, /* SIOCGIWRANGE */ NULL, /* SIOCSIWPRIV */ NULL, /* SIOCGIWPRIV */ NULL, /* SIOCSIWSTATS */ NULL, /* SIOCGIWSTATS */ dummy, /* SIOCSIWSPY */ dummy, /* SIOCGIWSPY */ NULL, /* SIOCGIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */ r8180_wx_set_wap, /* SIOCSIWAP */ r8180_wx_get_wap, /* SIOCGIWAP */ r8180_wx_set_mlme, /* SIOCSIWMLME*/ dummy, /* SIOCGIWAPLIST -- depricated */ r8180_wx_set_scan, /* SIOCSIWSCAN */ r8180_wx_get_scan, /* SIOCGIWSCAN */ r8180_wx_set_essid, /* SIOCSIWESSID */ r8180_wx_get_essid, /* SIOCGIWESSID */ dummy, /* SIOCSIWNICKN */ dummy, /* SIOCGIWNICKN */ NULL, /* -- hole -- */ NULL, /* -- hole -- */ r8180_wx_set_rate, /* SIOCSIWRATE */ r8180_wx_get_rate, /* SIOCGIWRATE */ r8180_wx_set_rts, /* SIOCSIWRTS */ r8180_wx_get_rts, /* SIOCGIWRTS */ r8180_wx_set_frag, /* SIOCSIWFRAG */ r8180_wx_get_frag, /* SIOCGIWFRAG */ dummy, /* SIOCSIWTXPOW */ dummy, /* SIOCGIWTXPOW */ r8180_wx_set_retry, /* SIOCSIWRETRY */ r8180_wx_get_retry, /* SIOCGIWRETRY */ r8180_wx_set_enc, /* SIOCSIWENCODE */ r8180_wx_get_enc, /* SIOCGIWENCODE */ r8180_wx_set_power, /* SIOCSIWPOWER */ r8180_wx_get_power, /* SIOCGIWPOWER */ NULL, /*---hole---*/ NULL, /*---hole---*/ r8180_wx_set_gen_ie, /* SIOCSIWGENIE */ NULL, /* SIOCSIWGENIE */ r8180_wx_set_auth, /* SIOCSIWAUTH */ NULL, /* SIOCSIWAUTH */ r8180_wx_set_enc_ext, /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWENCODEEXT */ NULL, /* SIOCSIWPMKSA */ NULL, /*---hole---*/ }; static const struct iw_priv_args r8180_private_args[] = { { SIOCIWFIRSTPRIV + 0x0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc" }, { SIOCIWFIRSTPRIV + 0x1, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beaconint" }, { SIOCIWFIRSTPRIV + 0x3, 0, 0, "dummy" }, /* added by christian */ //{ // SIOCIWFIRSTPRIV + 0x2, // IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "prismhdr" //}, /* end added by christian */ { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan" }, { SIOCIWFIRSTPRIV + 0x5, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx" }, { SIOCIWFIRSTPRIV + 0x7, 0, 0, "dummy" }, // { // SIOCIWFIRSTPRIV + 0x5, // 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpsmode" // }, // { // SIOCIWFIRSTPRIV + 0x6, // IW_PRIV_SIZE_FIXED, 0, "setpsmode" // }, //set/get mode have been realized in public handlers { SIOCIWFIRSTPRIV + 0x8, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode" }, { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getiwmode" }, { SIOCIWFIRSTPRIV + 0xA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setpreamble" }, { SIOCIWFIRSTPRIV + 0xB, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpreamble" }, { SIOCIWFIRSTPRIV + 0xC, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0xD, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getrssi" }, { SIOCIWFIRSTPRIV + 0xE, 0, 0, "dummy" }, { SIOCIWFIRSTPRIV + 0xF, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getlinkqual" }, { SIOCIWFIRSTPRIV + 0x10, 0, 0, "resetstats" }, { SIOCIWFIRSTPRIV + 0x11, 0,0, "dummy" }, { SIOCIWFIRSTPRIV + 0x12, 0, 0, "radioon" }, { SIOCIWFIRSTPRIV + 0x13, 0, 0, "radiooff" }, { SIOCIWFIRSTPRIV + 0x14, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setchannel" }, { SIOCIWFIRSTPRIV + 0x15, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getchannel" }, { SIOCIWFIRSTPRIV + 0x16, 0,0, "dummy" }, { SIOCIWFIRSTPRIV + 0x17, 0,IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getversion" }, { SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setrate" }, }; static iw_handler r8180_private_handler[] = { r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/ dummy, r8180_wx_set_beaconinterval, dummy, //r8180_wx_set_monitor_type, r8180_wx_set_scan_type, dummy, r8180_wx_set_rawtx, dummy, r8180_wx_set_iwmode, r8180_wx_get_iwmode, r8180_wx_set_preamble, r8180_wx_get_preamble, dummy, r8180_wx_get_siglevel, dummy, r8180_wx_get_sigqual, r8180_wx_reset_stats, dummy,//r8180_wx_get_stats r8180_wx_radio_on, r8180_wx_radio_off, r8180_wx_set_channelplan, r8180_wx_get_channelplan, dummy, r8180_wx_get_version, r8180_wx_set_forcerate, }; static inline int is_same_network(struct ieee80211_network *src, struct ieee80211_network *dst, struct ieee80211_device *ieee) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return (((src->ssid_len == dst->ssid_len)||(ieee->iw_mode == IW_MODE_INFRA)) && //YJ,mod, 080819,for hidden ap //((src->ssid_len == dst->ssid_len) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && (!memcmp(src->ssid, dst->ssid, src->ssid_len)||(ieee->iw_mode == IW_MODE_INFRA)) && //YJ,mod, 080819,for hidden ap //!memcmp(src->ssid, dst->ssid, src->ssid_len) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_BSS) == (dst->capability & WLAN_CAPABILITY_BSS))); } //WB modefied to show signal to GUI on 18-01-2008 static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev) { struct r8180_priv *priv = ieee80211_priv(dev); struct ieee80211_device* ieee = priv->ieee80211; struct iw_statistics* wstats = &priv->wstats; //struct ieee80211_network* target = NULL; int tmp_level = 0; int tmp_qual = 0; int tmp_noise = 0; //unsigned long flag; if (ieee->state < IEEE80211_LINKED) { wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; return wstats; } tmp_level = (&ieee->current_network)->stats.signal; tmp_qual = (&ieee->current_network)->stats.signalstrength; tmp_noise = (&ieee->current_network)->stats.noise; //printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise); // printk("level:%d\n", tmp_level); wstats->qual.level = tmp_level; wstats->qual.qual = tmp_qual; wstats->qual.noise = tmp_noise; wstats->qual.updated = IW_QUAL_ALL_UPDATED| IW_QUAL_DBM; return wstats; } struct iw_handler_def r8180_wx_handlers_def={ .standard = r8180_wx_handlers, .num_standard = ARRAY_SIZE(r8180_wx_handlers), .private = r8180_private_handler, .num_private = ARRAY_SIZE(r8180_private_handler), .num_private_args = sizeof(r8180_private_args) / sizeof(struct iw_priv_args), .get_wireless_stats = r8180_get_wireless_stats, .private_args = (struct iw_priv_args *)r8180_private_args, };
gpl-2.0
TeamFahQ/kernel_linux_next
arch/x86/kernel/kprobes/opt.c
940
12378
/* * Kernel Probes Jump Optimization (Optprobes) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Hitachi Ltd., 2012 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/hardirq.h> #include <linux/preempt.h> #include <linux/module.h> #include <linux/kdebug.h> #include <linux/kallsyms.h> #include <linux/ftrace.h> #include <asm/cacheflush.h> #include <asm/desc.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/alternative.h> #include <asm/insn.h> #include <asm/debugreg.h> #include "common.h" unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct optimized_kprobe *op; struct kprobe *kp; long offs; int i; for (i = 0; i < RELATIVEJUMP_SIZE; i++) { kp = get_kprobe((void *)addr - i); /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); /* If op->list is not empty, op is under optimizing */ if (list_empty(&op->list)) goto found; } } return addr; found: /* * If the kprobe can be optimized, original bytes which can be * overwritten by jump destination address. In this case, original * bytes must be recovered from op->optinsn.copied_insn buffer. */ memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); if (addr == (unsigned long)kp->addr) { buf[0] = kp->opcode; memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); } else { offs = addr - (unsigned long)kp->addr - 1; memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); } return (unsigned long)buf; } /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) { #ifdef CONFIG_X86_64 *addr++ = 0x48; *addr++ = 0xbf; #else *addr++ = 0xb8; #endif *(unsigned long *)addr = val; } asm ( ".global optprobe_template_entry\n" "optprobe_template_entry:\n" #ifdef CONFIG_X86_64 /* We don't bother saving the ss register */ " pushq %rsp\n" " pushfq\n" SAVE_REGS_STRING " movq %rsp, %rsi\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" ASM_NOP5 ASM_NOP5 ".global optprobe_template_call\n" "optprobe_template_call:\n" ASM_NOP5 /* Move flags to rsp */ " movq 144(%rsp), %rdx\n" " movq %rdx, 152(%rsp)\n" RESTORE_REGS_STRING /* Skip flags entry */ " addq $8, %rsp\n" " popfq\n" #else /* CONFIG_X86_32 */ " pushf\n" SAVE_REGS_STRING " movl %esp, %edx\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" ASM_NOP5 ".global optprobe_template_call\n" "optprobe_template_call:\n" ASM_NOP5 RESTORE_REGS_STRING " addl $4, %esp\n" /* skip cs */ " popf\n" #endif ".global optprobe_template_end\n" "optprobe_template_end:\n"); #define TMPL_MOVE_IDX \ ((long)&optprobe_template_val - (long)&optprobe_template_entry) #define TMPL_CALL_IDX \ ((long)&optprobe_template_call - (long)&optprobe_template_entry) #define TMPL_END_IDX \ ((long)&optprobe_template_end - (long)&optprobe_template_entry) #define INT3_SIZE sizeof(kprobe_opcode_t) /* Optimized kprobe call back function: called from optinsn */ static void optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; /* This is possible if op is under delayed unoptimizing */ if (kprobe_disabled(&op->kp)) return; local_irq_save(flags); if (kprobe_running()) { kprobes_inc_nmissed_count(&op->kp); } else { /* Save skipped registers */ #ifdef CONFIG_X86_64 regs->cs = __KERNEL_CS; #else regs->cs = __KERNEL_CS | get_kernel_rpl(); regs->gs = 0; #endif regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; regs->orig_ax = ~0UL; __this_cpu_write(current_kprobe, &op->kp); kcb->kprobe_status = KPROBE_HIT_ACTIVE; opt_pre_handler(&op->kp, regs); __this_cpu_write(current_kprobe, NULL); } local_irq_restore(flags); } NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src) { int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { ret = __copy_instruction(dest + len, src + len); if (!ret || !can_boost(dest + len)) return -EINVAL; len += ret; } /* Check whether the address range is reserved */ if (ftrace_text_reserved(src, src + len - 1) || alternatives_text_reserved(src, src + len - 1) || jump_label_text_reserved(src, src + len - 1)) return -EBUSY; return len; } /* Check whether insn is indirect jump */ static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ insn->opcode.bytes[0] == 0xea); /* Segment based jump */ } /* Check whether insn jumps into specified address range */ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) { unsigned long target = 0; switch (insn->opcode.bytes[0]) { case 0xe0: /* loopne */ case 0xe1: /* loope */ case 0xe2: /* loop */ case 0xe3: /* jcxz */ case 0xe9: /* near relative jump */ case 0xeb: /* short relative jump */ break; case 0x0f: if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */ break; return 0; default: if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */ break; return 0; } target = (unsigned long)insn->next_byte + insn->immediate.value; return (start <= target && target <= start + len); } /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { unsigned long addr, size = 0, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; /* Lookup symbol including addr */ if (!kallsyms_lookup_size_offset(paddr, &size, &offset)) return 0; /* * Do not optimize in the entry code due to the unstable * stack handling. */ if ((paddr >= (unsigned long)__entry_text_start) && (paddr < (unsigned long)__entry_text_end)) return 0; /* Check there is enough space for a relative jump. */ if (size - offset < RELATIVEJUMP_SIZE) return 0; /* Decode instructions */ addr = paddr - offset; while (addr < paddr - offset + size) { /* Decode until function end */ unsigned long recovered_insn; if (search_exception_tables(addr)) /* * Since some fixup code will jumps into this function, * we can't optimize kprobe in this function. */ return 0; recovered_insn = recover_probed_instruction(buf, addr); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); /* Check any instructions don't jump into target */ if (insn_is_indirect_jump(&insn) || insn_jump_into_range(&insn, paddr + INT3_SIZE, RELATIVE_ADDR_SIZE)) return 0; addr += insn.length; } return 1; } /* Check optimized_kprobe can actually be optimized. */ int arch_check_optimized_kprobe(struct optimized_kprobe *op) { int i; struct kprobe *p; for (i = 1; i < op->optinsn.size; i++) { p = get_kprobe(op->kp.addr + i); if (p && !kprobe_disabled(p)) return -EEXIST; } return 0; } /* Check the addr is within the optimized instructions. */ int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) { return ((unsigned long)op->kp.addr <= addr && (unsigned long)op->kp.addr + op->optinsn.size > addr); } /* Free optimized instruction slot */ static void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) { if (op->optinsn.insn) { free_optinsn_slot(op->optinsn.insn, dirty); op->optinsn.insn = NULL; op->optinsn.size = 0; } } void arch_remove_optimized_kprobe(struct optimized_kprobe *op) { __arch_remove_optimized_kprobe(op, 1); } /* * Copy replacing target instructions * Target instructions MUST be relocatable (checked inside) * This is called when new aggr(opt)probe is allocated or reused. */ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *__unused) { u8 *buf; int ret; long rel; if (!can_optimize((unsigned long)op->kp.addr)) return -EILSEQ; op->optinsn.insn = get_optinsn_slot(); if (!op->optinsn.insn) return -ENOMEM; /* * Verify if the address gap is in 2GB range, because this uses * a relative jump. */ rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; if (abs(rel) > 0x7fffffff) { __arch_remove_optimized_kprobe(op, 0); return -ERANGE; } buf = (u8 *)op->optinsn.insn; /* Copy instructions into the out-of-line buffer */ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); if (ret < 0) { __arch_remove_optimized_kprobe(op, 0); return ret; } op->optinsn.size = ret; /* Copy arch-dep-instance from template */ memcpy(buf, &optprobe_template_entry, TMPL_END_IDX); /* Set probe information */ synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); /* Set probe function call */ synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); /* Set returning jmp instruction at the tail of out-of-line buffer */ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, (u8 *)op->kp.addr + op->optinsn.size); flush_icache_range((unsigned long) buf, (unsigned long) buf + TMPL_END_IDX + op->optinsn.size + RELATIVEJUMP_SIZE); return 0; } /* * Replace breakpoints (int3) with relative jumps. * Caller must call with locking kprobe_mutex and text_mutex. */ void arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; u8 insn_buf[RELATIVEJUMP_SIZE]; list_for_each_entry_safe(op, tmp, oplist, list) { s32 rel = (s32)((long)op->optinsn.insn - ((long)op->kp.addr + RELATIVEJUMP_SIZE)); WARN_ON(kprobe_disabled(&op->kp)); /* Backup instructions which will be replaced by jump address */ memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, RELATIVE_ADDR_SIZE); insn_buf[0] = RELATIVEJUMP_OPCODE; *(s32 *)(&insn_buf[1]) = rel; text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, op->optinsn.insn); list_del_init(&op->list); } } /* Replace a relative jump with a breakpoint (int3). */ void arch_unoptimize_kprobe(struct optimized_kprobe *op) { u8 insn_buf[RELATIVEJUMP_SIZE]; /* Set int3 to first byte for kprobes */ insn_buf[0] = BREAKPOINT_INSTRUCTION; memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, op->optinsn.insn); } /* * Recover original instructions and breakpoints from relative jumps. * Caller must call with locking kprobe_mutex. */ extern void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list) { struct optimized_kprobe *op, *tmp; list_for_each_entry_safe(op, tmp, oplist, list) { arch_unoptimize_kprobe(op); list_move(&op->list, done_list); } } int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) { struct optimized_kprobe *op; if (p->flags & KPROBE_FLAG_OPTIMIZED) { /* This kprobe is really able to run optimized path. */ op = container_of(p, struct optimized_kprobe, kp); /* Detour through copied instructions */ regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX; if (!reenter) reset_current_kprobe(); preempt_enable_no_resched(); return 1; } return 0; } NOKPROBE_SYMBOL(setup_detour_execution);
gpl-2.0
nazunamoe/Oxygen_united_kernel-gproj
drivers/ata/sata_inic162x.c
1196
24624
/* * sata_inic162x.c - Driver for Initio 162x SATA controllers * * Copyright 2006 SUSE Linux Products GmbH * Copyright 2006 Tejun Heo <teheo@novell.com> * * This file is released under GPL v2. * * **** WARNING **** * * This driver never worked properly and unfortunately data corruption is * relatively common. There isn't anyone working on the driver and there's * no support from the vendor. Do not use this driver in any production * environment. * * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491 * https://bugzilla.kernel.org/show_bug.cgi?id=60565 * * ***************** * * This controller is eccentric and easily locks up if something isn't * right. Documentation is available at initio's website but it only * documents registers (not programming model). * * This driver has interesting history. The first version was written * from the documentation and a 2.4 IDE driver posted on a Taiwan * company, which didn't use any IDMA features and couldn't handle * LBA48. The resulting driver couldn't handle LBA48 devices either * making it pretty useless. * * After a while, initio picked the driver up, renamed it to * sata_initio162x, updated it to use IDMA for ATA DMA commands and * posted it on their website. It only used ATA_PROT_DMA for IDMA and * attaching both devices and issuing IDMA and !IDMA commands * simultaneously broke it due to PIRQ masking interaction but it did * show how to use the IDMA (ADMA + some initio specific twists) * engine. * * Then, I picked up their changes again and here's the usable driver * which uses IDMA for everything. Everything works now including * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some * issues tho. Result Tf is not resported properly, NCQ isn't * supported yet and CD/DVD writing works with DMA assisted PIO * protocol (which, for native SATA devices, shouldn't cause any * noticeable difference). * * Anyways, so, here's finally a working driver for inic162x. Enjoy! * * initio: If you guys wanna improve the driver regarding result TF * access and other stuff, please feel free to contact me. I'll be * happy to assist. */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/blkdev.h> #include <scsi/scsi_device.h> #define DRV_NAME "sata_inic162x" #define DRV_VERSION "0.4" enum { MMIO_BAR_PCI = 5, MMIO_BAR_CARDBUS = 1, NR_PORTS = 2, IDMA_CPB_TBL_SIZE = 4 * 32, INIC_DMA_BOUNDARY = 0xffffff, HOST_ACTRL = 0x08, HOST_CTL = 0x7c, HOST_STAT = 0x7e, HOST_IRQ_STAT = 0xbc, HOST_IRQ_MASK = 0xbe, PORT_SIZE = 0x40, /* registers for ATA TF operation */ PORT_TF_DATA = 0x00, PORT_TF_FEATURE = 0x01, PORT_TF_NSECT = 0x02, PORT_TF_LBAL = 0x03, PORT_TF_LBAM = 0x04, PORT_TF_LBAH = 0x05, PORT_TF_DEVICE = 0x06, PORT_TF_COMMAND = 0x07, PORT_TF_ALT_STAT = 0x08, PORT_IRQ_STAT = 0x09, PORT_IRQ_MASK = 0x0a, PORT_PRD_CTL = 0x0b, PORT_PRD_ADDR = 0x0c, PORT_PRD_XFERLEN = 0x10, PORT_CPB_CPBLAR = 0x18, PORT_CPB_PTQFIFO = 0x1c, /* IDMA register */ PORT_IDMA_CTL = 0x14, PORT_IDMA_STAT = 0x16, PORT_RPQ_FIFO = 0x1e, PORT_RPQ_CNT = 0x1f, PORT_SCR = 0x20, /* HOST_CTL bits */ HCTL_LEDEN = (1 << 3), /* enable LED operation */ HCTL_IRQOFF = (1 << 8), /* global IRQ off */ HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ HCTL_PWRDWN = (1 << 12), /* power down PHYs */ HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ HCTL_RPGSEL = (1 << 15), /* register page select */ HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST | HCTL_RPGSEL, /* HOST_IRQ_(STAT|MASK) bits */ HIRQ_PORT0 = (1 << 0), HIRQ_PORT1 = (1 << 1), HIRQ_SOFT = (1 << 14), HIRQ_GLOBAL = (1 << 15), /* STAT only */ /* PORT_IRQ_(STAT|MASK) bits */ PIRQ_OFFLINE = (1 << 0), /* device unplugged */ PIRQ_ONLINE = (1 << 1), /* device plugged */ PIRQ_COMPLETE = (1 << 2), /* completion interrupt */ PIRQ_FATAL = (1 << 3), /* fatal error */ PIRQ_ATA = (1 << 4), /* ATA interrupt */ PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */ PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, PIRQ_MASK_FREEZE = 0xff, /* PORT_PRD_CTL bits */ PRD_CTL_START = (1 << 0), PRD_CTL_WR = (1 << 3), PRD_CTL_DMAEN = (1 << 7), /* DMA enable */ /* PORT_IDMA_CTL bits */ IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */ IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ /* PORT_IDMA_STAT bits */ IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ IDMA_STAT_DONE = (1 << 7), /* ADMA done */ IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, /* CPB Control Flags*/ CPB_CTL_VALID = (1 << 0), /* CPB valid */ CPB_CTL_QUEUED = (1 << 1), /* queued command */ CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ /* CPB Response Flags */ CPB_RESP_DONE = (1 << 0), /* ATA command complete */ CPB_RESP_REL = (1 << 1), /* ATA release */ CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ /* PRD Control Flags */ PRD_DRAIN = (1 << 1), /* ignore data excess */ PRD_CDB = (1 << 2), /* atapi packet command pointer */ PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ PRD_DMA = (1 << 4), /* data transfer method */ PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ PRD_IOM = (1 << 6), /* io/memory transfer */ PRD_END = (1 << 7), /* APRD chain end */ }; /* Comman Parameter Block */ struct inic_cpb { u8 resp_flags; /* Response Flags */ u8 error; /* ATA Error */ u8 status; /* ATA Status */ u8 ctl_flags; /* Control Flags */ __le32 len; /* Total Transfer Length */ __le32 prd; /* First PRD pointer */ u8 rsvd[4]; /* 16 bytes */ u8 feature; /* ATA Feature */ u8 hob_feature; /* ATA Ex. Feature */ u8 device; /* ATA Device/Head */ u8 mirctl; /* Mirror Control */ u8 nsect; /* ATA Sector Count */ u8 hob_nsect; /* ATA Ex. Sector Count */ u8 lbal; /* ATA Sector Number */ u8 hob_lbal; /* ATA Ex. Sector Number */ u8 lbam; /* ATA Cylinder Low */ u8 hob_lbam; /* ATA Ex. Cylinder Low */ u8 lbah; /* ATA Cylinder High */ u8 hob_lbah; /* ATA Ex. Cylinder High */ u8 command; /* ATA Command */ u8 ctl; /* ATA Control */ u8 slave_error; /* Slave ATA Error */ u8 slave_status; /* Slave ATA Status */ /* 32 bytes */ } __packed; /* Physical Region Descriptor */ struct inic_prd { __le32 mad; /* Physical Memory Address */ __le16 len; /* Transfer Length */ u8 rsvd; u8 flags; /* Control Flags */ } __packed; struct inic_pkt { struct inic_cpb cpb; struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ u8 cdb[ATAPI_CDB_LEN]; } __packed; struct inic_host_priv { void __iomem *mmio_base; u16 cached_hctl; }; struct inic_port_priv { struct inic_pkt *pkt; dma_addr_t pkt_dma; u32 *cpb_tbl; dma_addr_t cpb_tbl_dma; }; static struct scsi_host_template inic_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ .dma_boundary = INIC_DMA_BOUNDARY, }; static const int scr_map[] = { [SCR_STATUS] = 0, [SCR_ERROR] = 1, [SCR_CONTROL] = 2, }; static void __iomem *inic_port_base(struct ata_port *ap) { struct inic_host_priv *hpriv = ap->host->private_data; return hpriv->mmio_base + ap->port_no * PORT_SIZE; } static void inic_reset_port(void __iomem *port_base) { void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; /* stop IDMA engine */ readw(idma_ctl); /* flush */ msleep(1); /* mask IRQ and assert reset */ writew(IDMA_CTL_RST_IDMA, idma_ctl); readw(idma_ctl); /* flush */ msleep(1); /* release reset */ writew(0, idma_ctl); /* clear irq */ writeb(0xff, port_base + PORT_IRQ_STAT); } static int inic_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val) { void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; void __iomem *addr; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) return -EINVAL; addr = scr_addr + scr_map[sc_reg] * 4; *val = readl(scr_addr + scr_map[sc_reg] * 4); /* this controller has stuck DIAG.N, ignore it */ if (sc_reg == SCR_ERROR) *val &= ~SERR_PHYRDY_CHG; return 0; } static int inic_scr_write(struct ata_link *link, unsigned sc_reg, u32 val) { void __iomem *scr_addr = inic_port_base(link->ap) + PORT_SCR; if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) return -EINVAL; writel(val, scr_addr + scr_map[sc_reg] * 4); return 0; } static void inic_stop_idma(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); readb(port_base + PORT_RPQ_FIFO); readb(port_base + PORT_RPQ_CNT); writew(0, port_base + PORT_IDMA_CTL); } static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) { struct ata_eh_info *ehi = &ap->link.eh_info; struct inic_port_priv *pp = ap->private_data; struct inic_cpb *cpb = &pp->pkt->cpb; bool freeze = false; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", irq_stat, idma_stat); inic_stop_idma(ap); if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { ata_ehi_push_desc(ehi, "hotplug"); ata_ehi_hotplugged(ehi); freeze = true; } if (idma_stat & IDMA_STAT_PERR) { ata_ehi_push_desc(ehi, "PCI error"); freeze = true; } if (idma_stat & IDMA_STAT_CPBERR) { ata_ehi_push_desc(ehi, "CPB error"); if (cpb->resp_flags & CPB_RESP_IGNORED) { __ata_ehi_push_desc(ehi, " ignored"); ehi->err_mask |= AC_ERR_INVALID; freeze = true; } if (cpb->resp_flags & CPB_RESP_ATA_ERR) ehi->err_mask |= AC_ERR_DEV; if (cpb->resp_flags & CPB_RESP_SPURIOUS) { __ata_ehi_push_desc(ehi, " spurious-intr"); ehi->err_mask |= AC_ERR_HSM; freeze = true; } if (cpb->resp_flags & (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { __ata_ehi_push_desc(ehi, " data-over/underflow"); ehi->err_mask |= AC_ERR_HSM; freeze = true; } } if (freeze) ata_port_freeze(ap); else ata_port_abort(ap); } static void inic_host_intr(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); u8 irq_stat; u16 idma_stat; /* read and clear IRQ status */ irq_stat = readb(port_base + PORT_IRQ_STAT); writeb(irq_stat, port_base + PORT_IRQ_STAT); idma_stat = readw(port_base + PORT_IDMA_STAT); if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) inic_host_err_intr(ap, irq_stat, idma_stat); if (unlikely(!qc)) goto spurious; if (likely(idma_stat & IDMA_STAT_DONE)) { inic_stop_idma(ap); /* Depending on circumstances, device error * isn't reported by IDMA, check it explicitly. */ if (unlikely(readb(port_base + PORT_TF_COMMAND) & (ATA_DF | ATA_ERR))) qc->err_mask |= AC_ERR_DEV; ata_qc_complete(qc); return; } spurious: ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", qc ? qc->tf.command : 0xff, irq_stat, idma_stat); } static irqreturn_t inic_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct inic_host_priv *hpriv = host->private_data; u16 host_irq_stat; int i, handled = 0; host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) goto out; spin_lock(&host->lock); for (i = 0; i < NR_PORTS; i++) if (host_irq_stat & (HIRQ_PORT0 << i)) { inic_host_intr(host->ports[i]); handled++; } spin_unlock(&host->lock); out: return IRQ_RETVAL(handled); } static int inic_check_atapi_dma(struct ata_queued_cmd *qc) { /* For some reason ATAPI_PROT_DMA doesn't work for some * commands including writes and other misc ops. Use PIO * protocol instead, which BTW is driven by the DMA engine * anyway, so it shouldn't make much difference for native * SATA devices. */ if (atapi_cmd_type(qc->cdb[0]) == READ) return 0; return 1; } static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) { struct scatterlist *sg; unsigned int si; u8 flags = 0; if (qc->tf.flags & ATA_TFLAG_WRITE) flags |= PRD_WRITE; if (ata_is_dma(qc->tf.protocol)) flags |= PRD_DMA; for_each_sg(qc->sg, sg, qc->n_elem, si) { prd->mad = cpu_to_le32(sg_dma_address(sg)); prd->len = cpu_to_le16(sg_dma_len(sg)); prd->flags = flags; prd++; } WARN_ON(!si); prd[-1].flags |= PRD_END; } static void inic_qc_prep(struct ata_queued_cmd *qc) { struct inic_port_priv *pp = qc->ap->private_data; struct inic_pkt *pkt = pp->pkt; struct inic_cpb *cpb = &pkt->cpb; struct inic_prd *prd = pkt->prd; bool is_atapi = ata_is_atapi(qc->tf.protocol); bool is_data = ata_is_data(qc->tf.protocol); unsigned int cdb_len = 0; VPRINTK("ENTER\n"); if (is_atapi) cdb_len = qc->dev->cdb_len; /* prepare packet, based on initio driver */ memset(pkt, 0, sizeof(struct inic_pkt)); cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; if (is_atapi || is_data) cpb->ctl_flags |= CPB_CTL_DATA; cpb->len = cpu_to_le32(qc->nbytes + cdb_len); cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); cpb->device = qc->tf.device; cpb->feature = qc->tf.feature; cpb->nsect = qc->tf.nsect; cpb->lbal = qc->tf.lbal; cpb->lbam = qc->tf.lbam; cpb->lbah = qc->tf.lbah; if (qc->tf.flags & ATA_TFLAG_LBA48) { cpb->hob_feature = qc->tf.hob_feature; cpb->hob_nsect = qc->tf.hob_nsect; cpb->hob_lbal = qc->tf.hob_lbal; cpb->hob_lbam = qc->tf.hob_lbam; cpb->hob_lbah = qc->tf.hob_lbah; } cpb->command = qc->tf.command; /* don't load ctl - dunno why. it's like that in the initio driver */ /* setup PRD for CDB */ if (is_atapi) { memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); prd->mad = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, cdb)); prd->len = cpu_to_le16(cdb_len); prd->flags = PRD_CDB | PRD_WRITE; if (!is_data) prd->flags |= PRD_END; prd++; } /* setup sg table */ if (is_data) inic_fill_sg(prd, qc); pp->cpb_tbl[0] = pp->pkt_dma; } static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_base = inic_port_base(ap); /* fire up the ADMA engine */ writew(HCTL_FTHD0 | HCTL_LEDEN, port_base + HOST_CTL); writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); writeb(0, port_base + PORT_CPB_PTQFIFO); return 0; } static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { void __iomem *port_base = inic_port_base(ap); tf->feature = readb(port_base + PORT_TF_FEATURE); tf->nsect = readb(port_base + PORT_TF_NSECT); tf->lbal = readb(port_base + PORT_TF_LBAL); tf->lbam = readb(port_base + PORT_TF_LBAM); tf->lbah = readb(port_base + PORT_TF_LBAH); tf->device = readb(port_base + PORT_TF_DEVICE); tf->command = readb(port_base + PORT_TF_COMMAND); } static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) { struct ata_taskfile *rtf = &qc->result_tf; struct ata_taskfile tf; /* FIXME: Except for status and error, result TF access * doesn't work. I tried reading from BAR0/2, CPB and BAR5. * None works regardless of which command interface is used. * For now return true iff status indicates device error. * This means that we're reporting bogus sector for RW * failures. Eeekk.... */ inic_tf_read(qc->ap, &tf); if (!(tf.command & ATA_ERR)) return false; rtf->command = tf.command; rtf->feature = tf.feature; return true; } static void inic_freeze(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); writeb(0xff, port_base + PORT_IRQ_STAT); } static void inic_thaw(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); writeb(0xff, port_base + PORT_IRQ_STAT); writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); } static int inic_check_ready(struct ata_link *link) { void __iomem *port_base = inic_port_base(link->ap); return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); } /* * SRST and SControl hardreset don't give valid signature on this * controller. Only controller specific hardreset mechanism works. */ static int inic_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; void __iomem *port_base = inic_port_base(ap); void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); int rc; /* hammer it into sane state */ inic_reset_port(port_base); writew(IDMA_CTL_RST_ATA, idma_ctl); readw(idma_ctl); /* flush */ ata_msleep(ap, 1); writew(0, idma_ctl); rc = sata_link_resume(link, timing, deadline); if (rc) { ata_link_warn(link, "failed to resume link after reset (errno=%d)\n", rc); return rc; } *class = ATA_DEV_NONE; if (ata_link_online(link)) { struct ata_taskfile tf; /* wait for link to become ready */ rc = ata_wait_after_reset(link, deadline, inic_check_ready); /* link occupied, -ENODEV too is an error */ if (rc) { ata_link_warn(link, "device not ready after hardreset (errno=%d)\n", rc); return rc; } inic_tf_read(ap, &tf); *class = ata_dev_classify(&tf); } return 0; } static void inic_error_handler(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); inic_reset_port(port_base); ata_std_error_handler(ap); } static void inic_post_internal_cmd(struct ata_queued_cmd *qc) { /* make DMA engine forget about the failed command */ if (qc->flags & ATA_QCFLAG_FAILED) inic_reset_port(inic_port_base(qc->ap)); } static void init_port(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); struct inic_port_priv *pp = ap->private_data; /* clear packet and CPB table */ memset(pp->pkt, 0, sizeof(struct inic_pkt)); memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); /* setup CPB lookup table addresses */ writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); } static int inic_port_resume(struct ata_port *ap) { init_port(ap); return 0; } static int inic_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct inic_port_priv *pp; /* alloc and initialize private data */ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; ap->private_data = pp; /* Alloc resources */ pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), &pp->pkt_dma, GFP_KERNEL); if (!pp->pkt) return -ENOMEM; pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, &pp->cpb_tbl_dma, GFP_KERNEL); if (!pp->cpb_tbl) return -ENOMEM; init_port(ap); return 0; } static struct ata_port_operations inic_port_ops = { .inherits = &sata_port_ops, .check_atapi_dma = inic_check_atapi_dma, .qc_prep = inic_qc_prep, .qc_issue = inic_qc_issue, .qc_fill_rtf = inic_qc_fill_rtf, .freeze = inic_freeze, .thaw = inic_thaw, .hardreset = inic_hardreset, .error_handler = inic_error_handler, .post_internal_cmd = inic_post_internal_cmd, .scr_read = inic_scr_read, .scr_write = inic_scr_write, .port_resume = inic_port_resume, .port_start = inic_port_start, }; static struct ata_port_info inic_port_info = { .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &inic_port_ops }; static int init_controller(void __iomem *mmio_base, u16 hctl) { int i; u16 val; hctl &= ~HCTL_KNOWN_BITS; /* Soft reset whole controller. Spec says reset duration is 3 * PCI clocks, be generous and give it 10ms. */ writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL); readw(mmio_base + HOST_CTL); /* flush */ for (i = 0; i < 10; i++) { msleep(1); val = readw(mmio_base + HOST_CTL); if (!(val & HCTL_SOFTRST)) break; } if (val & HCTL_SOFTRST) return -EIO; /* mask all interrupts and reset ports */ for (i = 0; i < NR_PORTS; i++) { void __iomem *port_base = mmio_base + i * PORT_SIZE; writeb(0xff, port_base + PORT_IRQ_MASK); inic_reset_port(port_base); } /* port IRQ is masked now, unmask global IRQ */ writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL); val = readw(mmio_base + HOST_IRQ_MASK); val &= ~(HIRQ_PORT0 | HIRQ_PORT1); writew(val, mmio_base + HOST_IRQ_MASK); return 0; } #ifdef CONFIG_PM static int inic_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); struct inic_host_priv *hpriv = host->private_data; int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); if (rc) return rc; } ata_host_resume(host); return 0; } #endif static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct ata_port_info *ppi[] = { &inic_port_info, NULL }; struct ata_host *host; struct inic_host_priv *hpriv; void __iomem * const *iomap; int mmio_bar; int i, rc; ata_print_version_once(&pdev->dev, DRV_VERSION); dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n"); /* alloc host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; /* Acquire resources and fill host. Note that PCI and cardbus * use different BARs. */ rc = pcim_enable_device(pdev); if (rc) return rc; if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) mmio_bar = MMIO_BAR_PCI; else mmio_bar = MMIO_BAR_CARDBUS; rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); if (rc) return rc; host->iomap = iomap = pcim_iomap_table(pdev); hpriv->mmio_base = iomap[mmio_bar]; hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); for (i = 0; i < NR_PORTS; i++) { struct ata_port *ap = host->ports[i]; ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); } /* Set dma_mask. This devices doesn't support 64bit addressing. */ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } /* * This controller is braindamaged. dma_boundary is 0xffff * like others but it will lock up the whole machine HARD if * 65536 byte PRD entry is fed. Reduce maximum segment size. */ rc = pci_set_dma_max_seg_size(pdev, 65536 - 512); if (rc) { dev_err(&pdev->dev, "failed to set the maximum segment size\n"); return rc; } rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); if (rc) { dev_err(&pdev->dev, "failed to initialize controller\n"); return rc; } pci_set_master(pdev); return ata_host_activate(host, pdev->irq, inic_interrupt, IRQF_SHARED, &inic_sht); } static const struct pci_device_id inic_pci_tbl[] = { { PCI_VDEVICE(INIT, 0x1622), }, { }, }; static struct pci_driver inic_pci_driver = { .name = DRV_NAME, .id_table = inic_pci_tbl, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = inic_pci_device_resume, #endif .probe = inic_init_one, .remove = ata_pci_remove_one, }; static int __init inic_init(void) { return pci_register_driver(&inic_pci_driver); } static void __exit inic_exit(void) { pci_unregister_driver(&inic_pci_driver); } MODULE_AUTHOR("Tejun Heo"); MODULE_DESCRIPTION("low-level driver for Initio 162x SATA"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, inic_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(inic_init); module_exit(inic_exit);
gpl-2.0
jayk/linux
arch/parisc/kernel/ftrace.c
1452
4373
/* * Code for tracing calls in Linux kernel. * Copyright (C) 2009 Helge Deller <deller@gmx.de> * * based on code for x86 which is: * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * * future possible enhancements: * - add CONFIG_DYNAMIC_FTRACE * - add CONFIG_STACK_TRACER */ #include <linux/init.h> #include <linux/ftrace.h> #include <asm/sections.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Add a function return address to the trace stack on thread info.*/ static int push_return_trace(unsigned long ret, unsigned long long time, unsigned long func, int *depth) { int index; if (!current->ret_stack) return -EBUSY; /* The return trace stack is full */ if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { atomic_inc(&current->trace_overrun); return -EBUSY; } index = ++current->curr_ret_stack; barrier(); current->ret_stack[index].ret = ret; current->ret_stack[index].func = func; current->ret_stack[index].calltime = time; *depth = index; return 0; } /* Retrieve a function return address to the trace stack on thread info.*/ static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) { int index; index = current->curr_ret_stack; if (unlikely(index < 0)) { ftrace_graph_stop(); WARN_ON(1); /* Might as well panic, otherwise we have no where to go */ *ret = (unsigned long) dereference_function_descriptor(&panic); return; } *ret = current->ret_stack[index].ret; trace->func = current->ret_stack[index].func; trace->calltime = current->ret_stack[index].calltime; trace->overrun = atomic_read(&current->trace_overrun); trace->depth = index; barrier(); current->curr_ret_stack--; } /* * Send the trace to the ring-buffer. * @return the original return address. */ unsigned long ftrace_return_to_handler(unsigned long retval0, unsigned long retval1) { struct ftrace_graph_ret trace; unsigned long ret; pop_return_trace(&trace, &ret); trace.rettime = local_clock(); ftrace_graph_return(&trace); if (unlikely(!ret)) { ftrace_graph_stop(); WARN_ON(1); /* Might as well panic. What else to do? */ ret = (unsigned long) dereference_function_descriptor(&panic); } /* HACK: we hand over the old functions' return values in %r23 and %r24. Assembly in entry.S will take care and move those to their final registers %ret0 and %ret1 */ asm( "copy %0, %%r23 \n\t" "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) ); return ret; } /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; unsigned long long calltime; struct ftrace_graph_ent trace; if (unlikely(ftrace_graph_is_dead())) return; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; old = *parent; *parent = (unsigned long) dereference_function_descriptor(&return_to_handler); if (unlikely(!__kernel_text_address(old))) { ftrace_graph_stop(); *parent = old; WARN_ON(1); return; } calltime = local_clock(); if (push_return_trace(old, calltime, self_addr, &trace.depth) == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ void ftrace_function_trampoline(unsigned long parent, unsigned long self_addr, unsigned long org_sp_gr3) { extern ftrace_func_t ftrace_trace_function; if (ftrace_trace_function != ftrace_stub) { ftrace_trace_function(parent, self_addr); return; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (ftrace_graph_entry && ftrace_graph_return) { unsigned long sp; unsigned long *parent_rp; asm volatile ("copy %%r30, %0" : "=r"(sp)); /* sanity check: is stack pointer which we got from assembler function in entry.S in a reasonable range compared to current stack pointer? */ if ((sp - org_sp_gr3) > 0x400) return; /* calculate pointer to %rp in stack */ parent_rp = (unsigned long *) org_sp_gr3 - 0x10; /* sanity check: parent_rp should hold parent */ if (*parent_rp != parent) return; prepare_ftrace_return(parent_rp, self_addr); return; } #endif }
gpl-2.0
sameerkhan07/furnace_kernel_motorola_falcon
drivers/target/tcm_fc/tfc_cmd.c
1964
14172
/* * Copyright (c) 2010 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ /* XXX TBD some includes may be extraneous */ #include <linux/module.h> #include <linux/moduleparam.h> #include <generated/utsrelease.h> #include <linux/utsname.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/types.h> #include <linux/string.h> #include <linux/configfs.h> #include <linux/ctype.h> #include <linux/hash.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/libfc.h> #include <scsi/fc_encode.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include "tcm_fc.h" /* * Dump cmd state for debugging. */ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) { struct fc_exch *ep; struct fc_seq *sp; struct se_cmd *se_cmd; struct scatterlist *sg; int count; se_cmd = &cmd->se_cmd; pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", caller, cmd, cmd->sess, cmd->seq, se_cmd); pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", caller, cmd, se_cmd->t_data_nents, se_cmd->data_length, se_cmd->se_cmd_flags); for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) pr_debug("%s: cmd %p sg %p page %p " "len 0x%x off 0x%x\n", caller, cmd, sg, sg_page(sg), sg->length, sg->offset); sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); pr_debug("%s: cmd %p sid %x did %x " "ox_id %x rx_id %x seq_id %x e_stat %x\n", caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, sp->id, ep->esb_stat); } } static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; if (!cmd) return; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); ft_sess_put(cmd->sess); /* undo get from lookup at recv */ kfree(cmd); } void ft_release_cmd(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); ft_free_cmd(cmd); } int ft_check_stop_free(struct se_cmd *se_cmd) { transport_generic_free_cmd(se_cmd, 0); return 1; } /* * Send response. */ int ft_queue_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_resp_with_ext *fcp; struct fc_lport *lport; struct fc_exch *ep; size_t len; if (cmd->aborted) return 0; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { /* XXX shouldn't just drop it - requeue and retry? */ return 0; } fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; len = se_cmd->scsi_sense_length; if (len) { fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; fcp->ext.fr_sns_len = htonl(len); memcpy((fcp + 1), se_cmd->sense_buffer, len); } /* * Test underflow and overflow with one mask. Usually both are off. * Bidirectional commands are not handled yet. */ if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) fcp->resp.fr_flags |= FCP_RESID_OVER; else fcp->resp.fr_flags |= FCP_RESID_UNDER; fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); } /* * Send response. */ cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); lport->tt.seq_send(lport, cmd->seq, fp); lport->tt.exch_done(cmd->seq); return 0; } int ft_write_pending_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); return cmd->write_data_len != se_cmd->data_length; } /* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); if (cmd->aborted) return 0; ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); /* Only if it is 'Exchange Responder' */ if (f_ctl & FC_FC_EX_CTX) { /* Target is 'exchange responder' and sending XFER_READY * to 'exchange initiator (initiator)' */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* * cmd may have been broken up into multiple * tasks. Link their sgs together so we can * operate on them all at once. */ transport_do_task_sg_chain(se_cmd); cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, cmd->sg, cmd->sg_cnt)) cmd->was_ddp_setup = 1; } } lport->tt.seq_send(lport, cmd->seq, fp); return 0; } u32 ft_get_task_tag(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); if (cmd->aborted) return ~0; return fc_seq_exch(cmd->seq)->rxid; } int ft_get_cmd_state(struct se_cmd *se_cmd) { return 0; } /* * FC sequence response handler for follow-on sequences (data) and aborts. */ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) { struct ft_cmd *cmd = arg; struct fc_frame_header *fh; if (unlikely(IS_ERR(fp))) { /* XXX need to find cmd if queued */ cmd->seq = NULL; cmd->aborted = true; return; } fh = fc_frame_header_get(fp); switch (fh->fh_r_ctl) { case FC_RCTL_DD_SOL_DATA: /* write data */ ft_recv_write_data(cmd, fp); break; case FC_RCTL_DD_UNSOL_CTL: /* command */ case FC_RCTL_DD_SOL_CTL: /* transfer ready */ case FC_RCTL_DD_DATA_DESC: /* transfer ready */ default: pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); ft_invl_hw_context(cmd); fc_frame_free(fp); transport_generic_free_cmd(&cmd->se_cmd, 0); break; } } /* * Send a FCP response including SCSI status and optional FCP rsp_code. * status is SAM_STAT_GOOD (zero) iff code is valid. * This is used in error cases, such as allocation failures. */ static void ft_send_resp_status(struct fc_lport *lport, const struct fc_frame *rx_fp, u32 status, enum fcp_resp_rsp_codes code) { struct fc_frame *fp; struct fc_seq *sp; const struct fc_frame_header *fh; size_t len; struct fcp_resp_with_ext *fcp; struct fcp_resp_rsp_info *info; fh = fc_frame_header_get(rx_fp); pr_debug("FCP error response: did %x oxid %x status %x code %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); len = sizeof(*fcp); if (status == SAM_STAT_GOOD) len += sizeof(*info); fp = fc_frame_alloc(lport, len); if (!fp) return; fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = status; if (status == SAM_STAT_GOOD) { fcp->ext.fr_rsp_len = htonl(sizeof(*info)); fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; info = (struct fcp_resp_rsp_info *)(fcp + 1); info->rsp_code = code; } fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); sp = fr_seq(fp); if (sp) { lport->tt.seq_send(lport, sp, fp); lport->tt.exch_done(sp); } else { lport->tt.frame_send(lport, fp); } } /* * Send error or task management response. */ static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) { ft_send_resp_status(cmd->sess->tport->lport, cmd->req_frame, SAM_STAT_GOOD, code); } /* * Send error or task management response. * Always frees the cmd and associated state. */ static void ft_send_resp_code_and_free(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code) { ft_send_resp_code(cmd, code); ft_free_cmd(cmd); } /* * Handle Task Management Request. */ static void ft_send_tm(struct ft_cmd *cmd) { struct fcp_cmnd *fcp; int rc; u8 tm_func; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: tm_func = TMR_LUN_RESET; break; case FCP_TMF_TGT_RESET: tm_func = TMR_TARGET_WARM_RESET; break; case FCP_TMF_CLR_TASK_SET: tm_func = TMR_CLEAR_TASK_SET; break; case FCP_TMF_ABT_TASK_SET: tm_func = TMR_ABORT_TASK_SET; break; case FCP_TMF_CLR_ACA: tm_func = TMR_CLEAR_ACA; break; default: /* * FCP4r01 indicates having a combination of * tm_flags set is invalid. */ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); return; } /* FIXME: Add referenced task tag for ABORT_TASK */ rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), cmd, tm_func, GFP_KERNEL, 0, 0); if (rc < 0) ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); } /* * Send status from completed task management request. */ int ft_queue_tm_resp(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct se_tmr_req *tmr = se_cmd->se_tmr_req; enum fcp_resp_rsp_codes code; if (cmd->aborted) return 0; switch (tmr->response) { case TMR_FUNCTION_COMPLETE: code = FCP_TMF_CMPL; break; case TMR_LUN_DOES_NOT_EXIST: code = FCP_TMF_INVALID_LUN; break; case TMR_FUNCTION_REJECTED: code = FCP_TMF_REJECTED; break; case TMR_TASK_DOES_NOT_EXIST: case TMR_TASK_STILL_ALLEGIANT: case TMR_TASK_FAILOVER_NOT_SUPPORTED: case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: case TMR_FUNCTION_AUTHORIZATION_FAILED: default: code = FCP_TMF_FAILED; break; } pr_debug("tmr fn %d resp %d fcp code %d\n", tmr->function, tmr->response, code); ft_send_resp_code(cmd, code); return 0; } static void ft_send_work(struct work_struct *work); /* * Handle incoming FCP command. */ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) { struct ft_cmd *cmd; struct fc_lport *lport = sess->tport->lport; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) goto busy; cmd->sess = sess; cmd->seq = lport->tt.seq_assign(lport, fp); if (!cmd->seq) { kfree(cmd); goto busy; } cmd->req_frame = fp; /* hold frame during cmd */ INIT_WORK(&cmd->work, ft_send_work); queue_work(sess->tport->tpg->workqueue, &cmd->work); return; busy: pr_debug("cmd or seq allocation failure - sending BUSY\n"); ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ } /* * Handle incoming FCP frame. * Caller has verified that the frame is type FCP. */ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); switch (fh->fh_r_ctl) { case FC_RCTL_DD_UNSOL_CMD: /* command */ ft_recv_cmd(sess, fp); break; case FC_RCTL_DD_SOL_DATA: /* write data */ case FC_RCTL_DD_UNSOL_CTL: case FC_RCTL_DD_SOL_CTL: case FC_RCTL_DD_DATA_DESC: /* transfer ready */ case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ default: pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ break; } } /* * Send new command to target. */ static void ft_send_work(struct work_struct *work) { struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct fcp_cmnd *fcp; int data_dir = 0; int task_attr; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) goto err; if (fcp->fc_flags & FCP_CFL_LEN_MASK) goto err; /* not handling longer CDBs yet */ /* * Check for FCP task management flags */ if (fcp->fc_tm_flags) { ft_send_tm(cmd); return; } switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { case 0: data_dir = DMA_NONE; break; case FCP_CFL_RDDATA: data_dir = DMA_FROM_DEVICE; break; case FCP_CFL_WRDATA: data_dir = DMA_TO_DEVICE; break; case FCP_CFL_WRDATA | FCP_CFL_RDDATA: goto err; /* TBD not supported by tcm_fc yet */ } /* * Locate the SAM Task Attr from fc_pri_ta */ switch (fcp->fc_pri_ta & FCP_PTA_MASK) { case FCP_PTA_HEADQ: task_attr = MSG_HEAD_TAG; break; case FCP_PTA_ORDERED: task_attr = MSG_ORDERED_TAG; break; case FCP_PTA_ACA: task_attr = MSG_ACA_TAG; break; case FCP_PTA_SIMPLE: /* Fallthrough */ default: task_attr = MSG_SIMPLE_TAG; } fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); /* * Use a single se_cmd->cmd_kref as we expect to release se_cmd * directly from ft_check_stop_free callback in response path. */ target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, 0); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); }
gpl-2.0
keks2293/kernel_zte
drivers/input/gameport/fm801-gp.c
2476
3980
/* * FM801 gameport driver for Linux * * Copyright (c) by Takashi Iwai <tiwai@suse.de> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/gameport.h> #define PCI_VENDOR_ID_FORTEMEDIA 0x1319 #define PCI_DEVICE_ID_FM801_GP 0x0802 #define HAVE_COOKED struct fm801_gp { struct gameport *gameport; struct resource *res_port; }; #ifdef HAVE_COOKED static int fm801_gp_cooked_read(struct gameport *gameport, int *axes, int *buttons) { unsigned short w; w = inw(gameport->io + 2); *buttons = (~w >> 14) & 0x03; axes[0] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 4); axes[1] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 6); *buttons |= ((~w >> 14) & 0x03) << 2; axes[2] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); w = inw(gameport->io + 8); axes[3] = (w == 0xffff) ? -1 : ((w & 0x1fff) << 5); outw(0xff, gameport->io); /* reset */ return 0; } #endif static int fm801_gp_open(struct gameport *gameport, int mode) { switch (mode) { #ifdef HAVE_COOKED case GAMEPORT_MODE_COOKED: return 0; #endif case GAMEPORT_MODE_RAW: return 0; default: return -1; } return 0; } static int fm801_gp_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct fm801_gp *gp; struct gameport *port; int error; gp = kzalloc(sizeof(struct fm801_gp), GFP_KERNEL); port = gameport_allocate_port(); if (!gp || !port) { printk(KERN_ERR "fm801-gp: Memory allocation failed\n"); error = -ENOMEM; goto err_out_free; } error = pci_enable_device(pci); if (error) goto err_out_free; port->open = fm801_gp_open; #ifdef HAVE_COOKED port->cooked_read = fm801_gp_cooked_read; #endif gameport_set_name(port, "FM801"); gameport_set_phys(port, "pci%s/gameport0", pci_name(pci)); port->dev.parent = &pci->dev; port->io = pci_resource_start(pci, 0); gp->gameport = port; gp->res_port = request_region(port->io, 0x10, "FM801 GP"); if (!gp->res_port) { printk(KERN_DEBUG "fm801-gp: unable to grab region 0x%x-0x%x\n", port->io, port->io + 0x0f); error = -EBUSY; goto err_out_disable_dev; } pci_set_drvdata(pci, gp); outb(0x60, port->io + 0x0d); /* enable joystick 1 and 2 */ gameport_register_port(port); return 0; err_out_disable_dev: pci_disable_device(pci); err_out_free: gameport_free_port(port); kfree(gp); return error; } static void fm801_gp_remove(struct pci_dev *pci) { struct fm801_gp *gp = pci_get_drvdata(pci); gameport_unregister_port(gp->gameport); release_resource(gp->res_port); kfree(gp); pci_disable_device(pci); } static const struct pci_device_id fm801_gp_id_table[] = { { PCI_VENDOR_ID_FORTEMEDIA, PCI_DEVICE_ID_FM801_GP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0 } }; MODULE_DEVICE_TABLE(pci, fm801_gp_id_table); static struct pci_driver fm801_gp_driver = { .name = "FM801_gameport", .id_table = fm801_gp_id_table, .probe = fm801_gp_probe, .remove = fm801_gp_remove, }; module_pci_driver(fm801_gp_driver); MODULE_DESCRIPTION("FM801 gameport driver"); MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_LICENSE("GPL");
gpl-2.0
kunato/s3-u6
arch/sh/kernel/cpu/shmobile/cpuidle.c
2732
3228
/* * arch/sh/kernel/cpu/shmobile/cpuidle.c * * Cpuidle support code for SuperH Mobile * * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/suspend.h> #include <linux/cpuidle.h> #include <asm/suspend.h> #include <asm/uaccess.h> #include <asm/hwblk.h> static unsigned long cpuidle_mode[] = { SUSP_SH_SLEEP, /* regular sleep mode */ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, struct cpuidle_state *state) { unsigned long allowed_mode = arch_hwblk_sleep_mode(); ktime_t before, after; int requested_state = state - &dev->states[0]; int allowed_state; int k; /* convert allowed mode to allowed state */ for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) if (cpuidle_mode[k] == allowed_mode) break; allowed_state = k; /* take the following into account for sleep mode selection: * - allowed_state: best mode allowed by hardware (clock deps) * - requested_state: best mode allowed by software (latencies) */ k = min_t(int, allowed_state, requested_state); dev->last_state = &dev->states[k]; before = ktime_get(); sh_mobile_call_standby(cpuidle_mode[k]); after = ktime_get(); return ktime_to_ns(ktime_sub(after, before)) >> 10; } static struct cpuidle_device cpuidle_dev; static struct cpuidle_driver cpuidle_driver = { .name = "sh_idle", .owner = THIS_MODULE, }; void sh_mobile_setup_cpuidle(void) { struct cpuidle_device *dev = &cpuidle_dev; struct cpuidle_state *state; int i; cpuidle_register_driver(&cpuidle_driver); for (i = 0; i < CPUIDLE_STATE_MAX; i++) { dev->states[i].name[0] = '\0'; dev->states[i].desc[0] = '\0'; } i = CPUIDLE_DRIVER_STATE_START; state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); state->exit_latency = 1; state->target_residency = 1 * 2; state->power_usage = 3; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; dev->safe_state = state; if (sh_mobile_sleep_supported & SUSP_SH_SF) { state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 100; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { state = &dev->states[i++]; snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); state->exit_latency = 2300; state->target_residency = 1 * 2; state->power_usage = 1; state->flags = 0; state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; } dev->state_count = i; cpuidle_register_device(dev); }
gpl-2.0
mythos234/zerolte-kernel-CM
drivers/staging/speakup/speakup_apollo.c
2988
6850
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * this code is specificly written as a driver for the speakup screenreview * package and is not a general device driver. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup.h" #define DRV_VERSION "2.21" #define SYNTH_CLEAR 0x18 #define PROCSPEECH '\r' static void do_catch_up(struct spk_synth *synth); static struct var_t vars[] = { { CAPS_START, .u.s = {"cap, " } }, { CAPS_STOP, .u.s = {"" } }, { RATE, .u.n = {"@W%d", 6, 1, 9, 0, 0, NULL } }, { PITCH, .u.n = {"@F%x", 10, 0, 15, 0, 0, NULL } }, { VOL, .u.n = {"@A%x", 10, 0, 15, 0, 0, NULL } }, { VOICE, .u.n = {"@V%d", 1, 1, 6, 0, 0, NULL } }, { LANG, .u.n = {"@=%d,", 1, 1, 4, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/apollo. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute lang_attribute = __ATTR(lang, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &lang_attribute.attr, &pitch_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_apollo = { .name = "apollo", .version = DRV_VERSION, .long_name = "Apollo", .init = "@R3@D0@K1\r", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = spk_serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "apollo", }, }; static void do_catch_up(struct spk_synth *synth) { u_char ch; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; struct var_t *full_time; int full_time_val = 0; int delay_time_val = 0; int jiffy_delta_val = 0; jiffy_delta = spk_get_var(JIFFY); delay_time = spk_get_var(DELAY); full_time = spk_get_var(FULL); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; full_time_val = full_time->u.n.value; delay_time_val = delay_time->u.n.value; if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); full_time_val = full_time->u.n.value; spk_unlock(flags); if (!spk_serial_out(ch)) { outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR); outb(UART_MCR_DTR | UART_MCR_RTS, speakup_info.port_tts + UART_MCR); schedule_timeout(msecs_to_jiffies(full_time_val)); continue; } if ((jiffies >= jiff_max) && (ch == SPACE)) { spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; full_time_val = full_time->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (spk_serial_out(synth->procspeech)) schedule_timeout(msecs_to_jiffies (delay_time_val)); else schedule_timeout(msecs_to_jiffies (full_time_val)); jiff_max = jiffies + jiffy_delta_val; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); } spk_serial_out(PROCSPEECH); } module_param_named(ser, synth_apollo.ser, int, S_IRUGO); module_param_named(start, synth_apollo.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init apollo_init(void) { return synth_add(&synth_apollo); } static void __exit apollo_exit(void) { synth_remove(&synth_apollo); } module_init(apollo_init); module_exit(apollo_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for Apollo II synthesizer"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
lawnn/android_kernel_sony_msm8974
drivers/net/wireless/zd1201.c
3756
46765
/* * Driver for ZyDAS zd1201 based wireless USB devices. * * Copyright (c) 2004, 2005 Jeroen Vreeken (pe1rxq@amsat.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * Parts of this driver have been derived from a wlan-ng version * modified by ZyDAS. They also made documentation available, thanks! * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/firmware.h> #include "zd1201.h" static struct usb_device_id zd1201_table[] = { {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */ {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */ {} }; static int ap; /* Are we an AP or a normal station? */ #define ZD1201_VERSION "0.15" MODULE_AUTHOR("Jeroen Vreeken <pe1rxq@amsat.org>"); MODULE_DESCRIPTION("Driver for ZyDAS ZD1201 based USB Wireless adapters"); MODULE_VERSION(ZD1201_VERSION); MODULE_LICENSE("GPL"); module_param(ap, int, 0); MODULE_PARM_DESC(ap, "If non-zero Access Point firmware will be loaded"); MODULE_DEVICE_TABLE(usb, zd1201_table); static int zd1201_fw_upload(struct usb_device *dev, int apfw) { const struct firmware *fw_entry; const char *data; unsigned long len; int err; unsigned char ret; char *buf; char *fwfile; if (apfw) fwfile = "zd1201-ap.fw"; else fwfile = "zd1201.fw"; err = request_firmware(&fw_entry, fwfile, &dev->dev); if (err) { dev_err(&dev->dev, "Failed to load %s firmware file!\n", fwfile); dev_err(&dev->dev, "Make sure the hotplug firmware loader is installed.\n"); dev_err(&dev->dev, "Goto http://linux-lc100020.sourceforge.net for more info.\n"); return err; } data = fw_entry->data; len = fw_entry->size; buf = kmalloc(1024, GFP_ATOMIC); if (!buf) goto exit; while (len > 0) { int translen = (len > 1024) ? 1024 : len; memcpy(buf, data, translen); err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0, USB_DIR_OUT | 0x40, 0, 0, buf, translen, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; len -= translen; data += translen; } err = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x2, USB_DIR_OUT | 0x40, 0, 0, NULL, 0, ZD1201_FW_TIMEOUT); if (err < 0) goto exit; err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); if (err < 0) goto exit; if (ret & 0x80) { err = -EIO; goto exit; } err = 0; exit: kfree(buf); release_firmware(fw_entry); return err; } MODULE_FIRMWARE("zd1201-ap.fw"); MODULE_FIRMWARE("zd1201.fw"); static void zd1201_usbfree(struct urb *urb) { struct zd1201 *zd = urb->context; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: urb failed: %d\n", zd->dev->name, urb->status); } kfree(urb->transfer_buffer); usb_free_urb(urb); } /* cmdreq message: u32 type u16 cmd u16 parm0 u16 parm1 u16 parm2 u8 pad[4] total: 4 + 2 + 2 + 2 + 2 + 4 = 16 */ static int zd1201_docmd(struct zd1201 *zd, int cmd, int parm0, int parm1, int parm2) { unsigned char *command; int ret; struct urb *urb; command = kmalloc(16, GFP_ATOMIC); if (!command) return -ENOMEM; *((__le32*)command) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&command[4]) = cpu_to_le16(cmd); *((__le16*)&command[6]) = cpu_to_le16(parm0); *((__le16*)&command[8]) = cpu_to_le16(parm1); *((__le16*)&command[10])= cpu_to_le16(parm2); urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(command); return -ENOMEM; } usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), command, 16, zd1201_usbfree, zd); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { kfree(command); usb_free_urb(urb); } return ret; } /* Callback after sending out a packet */ static void zd1201_usbtx(struct urb *urb) { struct zd1201 *zd = urb->context; netif_wake_queue(zd->dev); } /* Incoming data */ static void zd1201_usbrx(struct urb *urb) { struct zd1201 *zd = urb->context; int free = 0; unsigned char *data = urb->transfer_buffer; struct sk_buff *skb; unsigned char type; if (!zd) return; switch(urb->status) { case -EILSEQ: case -ENODEV: case -ETIME: case -ENOENT: case -EPIPE: case -EOVERFLOW: case -ESHUTDOWN: dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n", zd->dev->name, urb->status); free = 1; goto exit; } if (urb->status != 0 || urb->actual_length == 0) goto resubmit; type = data[0]; if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) { memcpy(zd->rxdata, data, urb->actual_length); zd->rxlen = urb->actual_length; zd->rxdatas = 1; wake_up(&zd->rxdataq); } /* Info frame */ if (type == ZD1201_PACKET_INQUIRE) { int i = 0; unsigned short infotype, framelen, copylen; framelen = le16_to_cpu(*(__le16*)&data[4]); infotype = le16_to_cpu(*(__le16*)&data[6]); if (infotype == ZD1201_INF_LINKSTATUS) { short linkstatus; linkstatus = le16_to_cpu(*(__le16*)&data[8]); switch(linkstatus) { case 1: netif_carrier_on(zd->dev); break; case 2: netif_carrier_off(zd->dev); break; case 3: netif_carrier_off(zd->dev); break; case 4: netif_carrier_on(zd->dev); break; default: netif_carrier_off(zd->dev); } goto resubmit; } if (infotype == ZD1201_INF_ASSOCSTATUS) { short status = le16_to_cpu(*(__le16*)(data+8)); int event; union iwreq_data wrqu; switch (status) { case ZD1201_ASSOCSTATUS_STAASSOC: case ZD1201_ASSOCSTATUS_REASSOC: event = IWEVREGISTERED; break; case ZD1201_ASSOCSTATUS_DISASSOC: case ZD1201_ASSOCSTATUS_ASSOCFAIL: case ZD1201_ASSOCSTATUS_AUTHFAIL: default: event = IWEVEXPIRED; } memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(zd->dev, event, &wrqu, NULL); goto resubmit; } if (infotype == ZD1201_INF_AUTHREQ) { union iwreq_data wrqu; memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* There isn't a event that trully fits this request. We assume that userspace will be smart enough to see a new station being expired and sends back a authstation ioctl to authorize it. */ wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL); goto resubmit; } /* Other infotypes are handled outside this handler */ zd->rxlen = 0; while (i < urb->actual_length) { copylen = le16_to_cpu(*(__le16*)&data[i+2]); /* Sanity check, sometimes we get junk */ if (copylen+zd->rxlen > sizeof(zd->rxdata)) break; memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen); zd->rxlen += copylen; i += 64; } if (i >= urb->actual_length) { zd->rxdatas = 1; wake_up(&zd->rxdataq); } goto resubmit; } /* Actual data */ if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) { int datalen = urb->actual_length-1; unsigned short len, fc, seq; struct hlist_node *node; len = ntohs(*(__be16 *)&data[datalen-2]); if (len>datalen) len=datalen; fc = le16_to_cpu(*(__le16 *)&data[datalen-16]); seq = le16_to_cpu(*(__le16 *)&data[datalen-24]); if (zd->monitor) { if (datalen < 24) goto resubmit; if (!(skb = dev_alloc_skb(datalen+24))) goto resubmit; memcpy(skb_put(skb, 2), &data[datalen-16], 2); memcpy(skb_put(skb, 2), &data[datalen-2], 2); memcpy(skb_put(skb, 6), &data[datalen-14], 6); memcpy(skb_put(skb, 6), &data[datalen-22], 6); memcpy(skb_put(skb, 6), &data[datalen-8], 6); memcpy(skb_put(skb, 2), &data[datalen-24], 2); memcpy(skb_put(skb, len), data, len); skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); goto resubmit; } if ((seq & IEEE80211_SCTL_FRAG) || (fc & IEEE80211_FCTL_MOREFRAGS)) { struct zd1201_frag *frag = NULL; char *ptr; if (datalen<14) goto resubmit; if ((seq & IEEE80211_SCTL_FRAG) == 0) { frag = kmalloc(sizeof(*frag), GFP_ATOMIC); if (!frag) goto resubmit; skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2); if (!skb) { kfree(frag); goto resubmit; } frag->skb = skb; frag->seq = seq & IEEE80211_SCTL_SEQ; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); hlist_add_head(&frag->fnode, &zd->fraglist); goto resubmit; } hlist_for_each_entry(frag, node, &zd->fraglist, fnode) if (frag->seq == (seq&IEEE80211_SCTL_SEQ)) break; if (!frag) goto resubmit; skb = frag->skb; ptr = skb_put(skb, len); if (ptr) memcpy(ptr, data+8, len); if (fc & IEEE80211_FCTL_MOREFRAGS) goto resubmit; hlist_del_init(&frag->fnode); kfree(frag); } else { if (datalen<14) goto resubmit; skb = dev_alloc_skb(len + 14 + 2); if (!skb) goto resubmit; skb_reserve(skb, 2); memcpy(skb_put(skb, 12), &data[datalen-14], 12); memcpy(skb_put(skb, 2), &data[6], 2); memcpy(skb_put(skb, len), data+8, len); } skb->protocol = eth_type_trans(skb, zd->dev); zd->dev->stats.rx_packets++; zd->dev->stats.rx_bytes += skb->len; netif_rx(skb); } resubmit: memset(data, 0, ZD1201_RXSIZE); urb->status = 0; urb->dev = zd->usb; if(usb_submit_urb(urb, GFP_ATOMIC)) free = 1; exit: if (free) { zd->rxlen = 0; zd->rxdatas = 1; wake_up(&zd->rxdataq); kfree(urb->transfer_buffer); } } static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata, unsigned int riddatalen) { int err; int i = 0; int code; int rid_fid; int length; unsigned char *pdata; zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ACCESS, rid, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; code = le16_to_cpu(*(__le16*)(&zd->rxdata[4])); rid_fid = le16_to_cpu(*(__le16*)(&zd->rxdata[6])); length = le16_to_cpu(*(__le16*)(&zd->rxdata[8])); if (length > zd->rxlen) length = zd->rxlen-6; /* If access bit is not on, then error */ if ((code & ZD1201_ACCESSBIT) != ZD1201_ACCESSBIT || rid_fid != rid ) return -EINVAL; /* Not enough buffer for allocating data */ if (riddatalen != (length - 4)) { dev_dbg(&zd->usb->dev, "riddatalen mismatches, expected=%u, (packet=%u) length=%u, rid=0x%04X, rid_fid=0x%04X\n", riddatalen, zd->rxlen, length, rid, rid_fid); return -ENODATA; } zd->rxdatas = 0; /* Issue SetRxRid commnd */ err = zd1201_docmd(zd, ZD1201_CMDCODE_SETRXRID, rid, 0, length); if (err) return err; /* Receive RID record from resource packets */ wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (zd->rxdata[zd->rxlen - 1] != ZD1201_PACKET_RESOURCE) { dev_dbg(&zd->usb->dev, "Packet type mismatch: 0x%x not 0x3\n", zd->rxdata[zd->rxlen-1]); return -EINVAL; } /* Set the data pointer and received data length */ pdata = zd->rxdata; length = zd->rxlen; do { int actual_length; actual_length = (length > 64) ? 64 : length; if (pdata[0] != 0x3) { dev_dbg(&zd->usb->dev, "Rx Resource packet type error: %02X\n", pdata[0]); return -EINVAL; } if (actual_length != 64) { /* Trim the last packet type byte */ actual_length--; } /* Skip the 4 bytes header (RID length and RID) */ if (i == 0) { pdata += 8; actual_length -= 8; } else { pdata += 4; actual_length -= 4; } memcpy(riddata, pdata, actual_length); riddata += actual_length; pdata += actual_length; length -= 64; i++; } while (length > 0); return 0; } /* * resreq: * byte type * byte sequence * u16 reserved * byte data[12] * total: 16 */ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait) { int err; unsigned char *request; int reqlen; char seq=0; struct urb *urb; gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; len += 4; /* first 4 are for header */ zd->rxdatas = 0; zd->rxlen = 0; for (seq=0; len > 0; seq++) { request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } memset(request, 0, 16); reqlen = len>12 ? 12 : len; request[0] = ZD1201_USB_RESREQ; request[1] = seq; request[2] = 0; request[3] = 0; if (request[1] == 0) { /* add header */ *(__le16*)&request[4] = cpu_to_le16((len-2+1)/2); *(__le16*)&request[6] = cpu_to_le16(rid); memcpy(request+8, buf, reqlen-4); buf += reqlen-4; } else { memcpy(request+4, buf, reqlen); buf += reqlen; } len -= reqlen; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; } request = kmalloc(16, gfp_mask); if (!request) return -ENOMEM; urb = usb_alloc_urb(0, gfp_mask); if (!urb) { kfree(request); return -ENOMEM; } *((__le32*)request) = cpu_to_le32(ZD1201_USB_CMDREQ); *((__le16*)&request[4]) = cpu_to_le16(ZD1201_CMDCODE_ACCESS|ZD1201_ACCESSBIT); *((__le16*)&request[6]) = cpu_to_le16(rid); *((__le16*)&request[8]) = cpu_to_le16(0); *((__le16*)&request[10]) = cpu_to_le16(0); usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out2), request, 16, zd1201_usbfree, zd); err = usb_submit_urb(urb, gfp_mask); if (err) goto err; if (wait) { wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen || le16_to_cpu(*(__le16*)&zd->rxdata[6]) != rid) { dev_dbg(&zd->usb->dev, "wrong or no RID received\n"); } } return 0; err: kfree(request); usb_free_urb(urb); return err; } static inline int zd1201_getconfig16(struct zd1201 *zd, int rid, short *val) { int err; __le16 zdval; err = zd1201_getconfig(zd, rid, &zdval, sizeof(__le16)); if (err) return err; *val = le16_to_cpu(zdval); return 0; } static inline int zd1201_setconfig16(struct zd1201 *zd, int rid, short val) { __le16 zdval = cpu_to_le16(val); return (zd1201_setconfig(zd, rid, &zdval, sizeof(__le16), 1)); } static int zd1201_drvr_start(struct zd1201 *zd) { int err, i; short max; __le16 zdmax; unsigned char *buffer; buffer = kzalloc(ZD1201_RXSIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; usb_fill_bulk_urb(zd->rx_urb, zd->usb, usb_rcvbulkpipe(zd->usb, zd->endp_in), buffer, ZD1201_RXSIZE, zd1201_usbrx, zd); err = usb_submit_urb(zd->rx_urb, GFP_KERNEL); if (err) goto err_buffer; err = zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); if (err) goto err_urb; err = zd1201_getconfig(zd, ZD1201_RID_CNFMAXTXBUFFERNUMBER, &zdmax, sizeof(__le16)); if (err) goto err_urb; max = le16_to_cpu(zdmax); for (i=0; i<max; i++) { err = zd1201_docmd(zd, ZD1201_CMDCODE_ALLOC, 1514, 0, 0); if (err) goto err_urb; } return 0; err_urb: usb_kill_urb(zd->rx_urb); return err; err_buffer: kfree(buffer); return err; } /* Magic alert: The firmware doesn't seem to like the MAC state being * toggled in promisc (aka monitor) mode. * (It works a number of times, but will halt eventually) * So we turn it of before disabling and on after enabling if needed. */ static int zd1201_enable(struct zd1201 *zd) { int err; if (zd->mac_enabled) return 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_ENABLE, 0, 0, 0); if (!err) zd->mac_enabled = 1; if (zd->monitor) err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 1); return err; } static int zd1201_disable(struct zd1201 *zd) { int err; if (!zd->mac_enabled) return 0; if (zd->monitor) { err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; } err = zd1201_docmd(zd, ZD1201_CMDCODE_DISABLE, 0, 0, 0); if (!err) zd->mac_enabled = 0; return err; } static int zd1201_mac_reset(struct zd1201 *zd) { if (!zd->mac_enabled) return 0; zd1201_disable(zd); return zd1201_enable(zd); } static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen) { int err, val; char buf[IW_ESSID_MAX_SIZE+2]; err = zd1201_disable(zd); if (err) return err; val = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; val |= ZD1201_CNFAUTHENTICATION_SHAREDKEY; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, val); if (err) return err; *(__le16 *)buf = cpu_to_le16(essidlen); memcpy(buf+2, essid, essidlen); if (!zd->ap) { /* Normal station */ err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } else { /* AP */ err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, zd->dev->dev_addr, zd->dev->addr_len, 1); if (err) return err; err = zd1201_enable(zd); if (err) return err; msleep(100); return 0; } static int zd1201_net_open(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); /* Start MAC with wildcard if no essid set */ if (!zd->mac_enabled) zd1201_join(zd, zd->essid, zd->essidlen); netif_start_queue(dev); return 0; } static int zd1201_net_stop(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* RFC 1042 encapsulates Ethernet frames in 802.11 frames by prefixing them with 0xaa, 0xaa, 0x03) followed by a SNAP OID of 0 (0x00, 0x00, 0x00). Zd requires an additional padding, copy of ethernet addresses, length of the standard RFC 1042 packet and a command byte (which is nul for tx). tx frame (from Wlan NG): RFC 1042: llc 0xAA 0xAA 0x03 (802.2 LLC) snap 0x00 0x00 0x00 (Ethernet encapsulated) type 2 bytes, Ethernet type field payload (minus eth header) Zydas specific: padding 1B if (skb->len+8+1)%64==0 Eth MAC addr 12 bytes, Ethernet MAC addresses length 2 bytes, RFC 1042 packet length (llc+snap+type+payload) zd 1 null byte, zd1201 packet type */ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); unsigned char *txbuf = zd->txdata; int txbuflen, pad = 0, err; struct urb *urb = zd->tx_urb; if (!zd->mac_enabled || zd->monitor) { dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } netif_stop_queue(dev); txbuflen = skb->len + 8 + 1; if (txbuflen%64 == 0) { pad = 1; txbuflen++; } txbuf[0] = 0xAA; txbuf[1] = 0xAA; txbuf[2] = 0x03; txbuf[3] = 0x00; /* rfc1042 */ txbuf[4] = 0x00; txbuf[5] = 0x00; skb_copy_from_linear_data_offset(skb, 12, txbuf + 6, skb->len - 12); if (pad) txbuf[skb->len-12+6]=0; skb_copy_from_linear_data(skb, txbuf + skb->len - 12 + 6 + pad, 12); *(__be16*)&txbuf[skb->len+6+pad] = htons(skb->len-12+6); txbuf[txbuflen-1] = 0; usb_fill_bulk_urb(urb, zd->usb, usb_sndbulkpipe(zd->usb, zd->endp_out), txbuf, txbuflen, zd1201_usbtx, zd); err = usb_submit_urb(zd->tx_urb, GFP_ATOMIC); if (err) { dev->stats.tx_errors++; netif_start_queue(dev); } else { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } kfree_skb(skb); return NETDEV_TX_OK; } static void zd1201_tx_timeout(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); if (!zd) return; dev_warn(&zd->usb->dev, "%s: TX timeout, shooting down urb\n", dev->name); usb_unlink_urb(zd->tx_urb); dev->stats.tx_errors++; /* Restart the timeout to quiet the watchdog: */ dev->trans_start = jiffies; /* prevent tx timeout */ } static int zd1201_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct zd1201 *zd = netdev_priv(dev); int err; if (!zd) return -ENODEV; err = zd1201_setconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr->sa_data, dev->addr_len, 1); if (err) return err; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return zd1201_mac_reset(zd); } static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); return &zd->iwstats; } static void zd1201_set_multicast(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); struct netdev_hw_addr *ha; unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; int i; if (netdev_mc_count(dev) > ZD1201_MAXMULTI) return; i = 0; netdev_for_each_mc_addr(ha, dev) memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, netdev_mc_count(dev) * ETH_ALEN, 0); } static int zd1201_config_commit(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); return zd1201_mac_reset(zd); } static int zd1201_get_name(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) { strcpy(name, "IEEE 802.11b"); return 0; } static int zd1201_set_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel = 0; int err; if (freq->e == 0) channel = freq->m; else { channel = ieee80211_freq_to_dsss_chan(freq->m); if (channel < 0) channel = 0; } err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel); if (err) return err; zd1201_mac_reset(zd); return 0; } static int zd1201_get_freq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short channel; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, &channel); if (err) return err; freq->e = 0; freq->m = channel; return 0; } static int zd1201_set_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype, monitor = 0; unsigned char buffer[IW_ESSID_MAX_SIZE+2]; int err; if (zd->ap) { if (*mode != IW_MODE_MASTER) return -EINVAL; return 0; } err = zd1201_setconfig16(zd, ZD1201_RID_PROMISCUOUSMODE, 0); if (err) return err; zd->dev->type = ARPHRD_ETHER; switch(*mode) { case IW_MODE_MONITOR: monitor = 1; zd->dev->type = ARPHRD_IEEE80211; /* Make sure we are no longer associated with by setting an 'impossible' essid. (otherwise we mess up firmware) */ zd1201_join(zd, "\0-*#\0", 5); /* Put port in pIBSS */ case 8: /* No pseudo-IBSS in wireless extensions (yet) */ porttype = ZD1201_PORTTYPE_PSEUDOIBSS; break; case IW_MODE_ADHOC: porttype = ZD1201_PORTTYPE_IBSS; break; case IW_MODE_INFRA: porttype = ZD1201_PORTTYPE_BSS; break; default: return -EINVAL; } err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) return err; if (zd->monitor && !monitor) { zd1201_disable(zd); *(__le16 *)buffer = cpu_to_le16(zd->essidlen); memcpy(buffer+2, zd->essid, zd->essidlen); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buffer, IW_ESSID_MAX_SIZE+2, 1); if (err) return err; } zd->monitor = monitor; /* If monitor mode is set we don't actually turn it on here since it * is done during mac reset anyway (see zd1201_mac_enable). */ zd1201_mac_reset(zd); return 0; } static int zd1201_get_mode(struct net_device *dev, struct iw_request_info *info, __u32 *mode, char *extra) { struct zd1201 *zd = netdev_priv(dev); short porttype; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPORTTYPE, &porttype); if (err) return err; switch(porttype) { case ZD1201_PORTTYPE_IBSS: *mode = IW_MODE_ADHOC; break; case ZD1201_PORTTYPE_BSS: *mode = IW_MODE_INFRA; break; case ZD1201_PORTTYPE_WDS: *mode = IW_MODE_REPEAT; break; case ZD1201_PORTTYPE_PSEUDOIBSS: *mode = 8;/* No Pseudo-IBSS... */ break; case ZD1201_PORTTYPE_AP: *mode = IW_MODE_MASTER; break; default: dev_dbg(&zd->usb->dev, "Unknown porttype: %d\n", porttype); *mode = IW_MODE_AUTO; } if (zd->monitor) *mode = IW_MODE_MONITOR; return 0; } static int zd1201_get_range(struct net_device *dev, struct iw_request_info *info, struct iw_point *wrq, char *extra) { struct iw_range *range = (struct iw_range *)extra; wrq->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); range->we_version_compiled = WIRELESS_EXT; range->we_version_source = WIRELESS_EXT; range->max_qual.qual = 128; range->max_qual.level = 128; range->max_qual.noise = 128; range->max_qual.updated = 7; range->encoding_size[0] = 5; range->encoding_size[1] = 13; range->num_encoding_sizes = 2; range->max_encoding_tokens = ZD1201_NUMKEYS; range->num_bitrates = 4; range->bitrate[0] = 1000000; range->bitrate[1] = 2000000; range->bitrate[2] = 5500000; range->bitrate[3] = 11000000; range->min_rts = 0; range->min_frag = ZD1201_FRAGMIN; range->max_rts = ZD1201_RTSMAX; range->min_frag = ZD1201_FRAGMAX; return 0; } /* Little bit of magic here: we only get the quality if we poll * for it, and we never get an actual request to trigger such * a poll. Therefore we 'assume' that the user will soon ask for * the stats after asking the bssid. */ static int zd1201_get_wap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[6]; if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) { /* Unfortunately the quality and noise reported is useless. they seem to be accumulators that increase until you read them, unless we poll on a fixed interval we can't use them */ /*zd->iwstats.qual.qual = le16_to_cpu(((__le16 *)buffer)[0]);*/ zd->iwstats.qual.level = le16_to_cpu(((__le16 *)buffer)[1]); /*zd->iwstats.qual.noise = le16_to_cpu(((__le16 *)buffer)[2]);*/ zd->iwstats.qual.updated = 2; } return zd1201_getconfig(zd, ZD1201_RID_CURRENTBSSID, ap_addr->sa_data, 6); } static int zd1201_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { /* We do everything in get_scan */ return 0; } static int zd1201_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *srq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err, i, j, enabled_save; struct iw_event iwe; char *cev = extra; char *end_buf = extra + IW_SCAN_MAX_DATA; /* No scanning in AP mode */ if (zd->ap) return -EOPNOTSUPP; /* Scan doesn't seem to work if disabled */ enabled_save = zd->mac_enabled; zd1201_enable(zd); zd->rxdatas = 0; err = zd1201_docmd(zd, ZD1201_CMDCODE_INQUIRE, ZD1201_INQ_SCANRESULTS, 0, 0); if (err) return err; wait_event_interruptible(zd->rxdataq, zd->rxdatas); if (!zd->rxlen) return -EIO; if (le16_to_cpu(*(__le16*)&zd->rxdata[2]) != ZD1201_INQ_SCANRESULTS) return -EIO; for(i=8; i<zd->rxlen; i+=62) { iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, zd->rxdata+i+6, 6); cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_ADDR_LEN); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = zd->rxdata[i+16]; iwe.u.data.flags = 1; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, zd->rxdata+i+18); iwe.cmd = SIOCGIWMODE; if (zd->rxdata[i+14]&0x01) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_UINT_LEN); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = zd->rxdata[i+0]; iwe.u.freq.e = 0; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_FREQ_LEN); iwe.cmd = SIOCGIWRATE; iwe.u.bitrate.fixed = 0; iwe.u.bitrate.disabled = 0; for (j=0; j<10; j++) if (zd->rxdata[i+50+j]) { iwe.u.bitrate.value = (zd->rxdata[i+50+j]&0x7f)*500000; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_PARAM_LEN); } iwe.cmd = SIOCGIWENCODE; iwe.u.data.length = 0; if (zd->rxdata[i+14]&0x10) iwe.u.data.flags = IW_ENCODE_ENABLED; else iwe.u.data.flags = IW_ENCODE_DISABLED; cev = iwe_stream_add_point(info, cev, end_buf, &iwe, NULL); iwe.cmd = IWEVQUAL; iwe.u.qual.qual = zd->rxdata[i+4]; iwe.u.qual.noise= zd->rxdata[i+2]/10-100; iwe.u.qual.level = (256+zd->rxdata[i+4]*100)/255-100; iwe.u.qual.updated = 7; cev = iwe_stream_add_event(info, cev, end_buf, &iwe, IW_EV_QUAL_LEN); } if (!enabled_save) zd1201_disable(zd); srq->length = cev - extra; srq->flags = 0; return 0; } static int zd1201_set_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); if (data->length > IW_ESSID_MAX_SIZE) return -EINVAL; if (data->length < 1) data->length = 1; zd->essidlen = data->length; memset(zd->essid, 0, IW_ESSID_MAX_SIZE+1); memcpy(zd->essid, essid, data->length); return zd1201_join(zd, zd->essid, zd->essidlen); } static int zd1201_get_essid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *essid) { struct zd1201 *zd = netdev_priv(dev); memcpy(essid, zd->essid, zd->essidlen); data->flags = 1; data->length = zd->essidlen; return 0; } static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *nick) { strcpy(nick, "zd1201"); data->flags = 1; data->length = strlen(nick); return 0; } static int zd1201_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; switch (rrq->value) { case 1000000: rate = ZD1201_RATEB1; break; case 2000000: rate = ZD1201_RATEB2; break; case 5500000: rate = ZD1201_RATEB5; break; case 11000000: default: rate = ZD1201_RATEB11; break; } if (!rrq->fixed) { /* Also enable all lower bitrates */ rate |= rate-1; } err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, rate); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rate; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CURRENTTXRATE, &rate); if (err) return err; switch(rate) { case 1: rrq->value = 1000000; break; case 2: rrq->value = 2000000; break; case 5: rrq->value = 5500000; break; case 11: rrq->value = 11000000; break; default: rrq->value = 0; } rrq->fixed = 0; rrq->disabled = 0; return 0; } static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = rts->value; if (rts->disabled || !rts->fixed) val = ZD1201_RTSMAX; if (val > ZD1201_RTSMAX) return -EINVAL; if (val < 0) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) { struct zd1201 *zd = netdev_priv(dev); short rtst; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFRTSTHRESHOLD, &rtst); if (err) return err; rts->value = rtst; rts->disabled = (rts->value == ZD1201_RTSMAX); rts->fixed = 1; return 0; } static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; short val = frag->value; if (frag->disabled || !frag->fixed) val = ZD1201_FRAGMAX; if (val > ZD1201_FRAGMAX) return -EINVAL; if (val < ZD1201_FRAGMIN) return -EINVAL; if (val & 1) return -EINVAL; err = zd1201_setconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, val); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, struct iw_param *frag, char *extra) { struct zd1201 *zd = netdev_priv(dev); short fragt; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFFRAGTHRESHOLD, &fragt); if (err) return err; frag->value = fragt; frag->disabled = (frag->value == ZD1201_FRAGMAX); frag->fixed = 1; return 0; } static int zd1201_set_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_get_retry(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { return 0; } static int zd1201_set_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err, rid; if (erq->length > ZD1201_MAXKEYLEN) return -EINVAL; i = (erq->flags & IW_ENCODE_INDEX)-1; if (i == -1) { err = zd1201_getconfig16(zd,ZD1201_RID_CNFDEFAULTKEYID,&i); if (err) return err; } else { err = zd1201_setconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, i); if (err) return err; } if (i < 0 || i >= ZD1201_NUMKEYS) return -EINVAL; rid = ZD1201_RID_CNFDEFAULTKEY0 + i; err = zd1201_setconfig(zd, rid, key, erq->length, 1); if (err) return err; zd->encode_keylen[i] = erq->length; memcpy(zd->encode_keys[i], key, erq->length); i=0; if (!(erq->flags & IW_ENCODE_DISABLED & IW_ENCODE_MODE)) { i |= 0x01; zd->encode_enabled = 1; } else zd->encode_enabled = 0; if (erq->flags & IW_ENCODE_RESTRICTED & IW_ENCODE_MODE) { i |= 0x02; zd->encode_restricted = 1; } else zd->encode_restricted = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFWEBFLAGS, i); if (err) return err; if (zd->encode_enabled) i = ZD1201_CNFAUTHENTICATION_SHAREDKEY; else i = ZD1201_CNFAUTHENTICATION_OPENSYSTEM; err = zd1201_setconfig16(zd, ZD1201_RID_CNFAUTHENTICATION, i); if (err) return err; return zd1201_mac_reset(zd); } static int zd1201_get_encode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *key) { struct zd1201 *zd = netdev_priv(dev); short i; int err; if (zd->encode_enabled) erq->flags = IW_ENCODE_ENABLED; else erq->flags = IW_ENCODE_DISABLED; if (zd->encode_restricted) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; i = (erq->flags & IW_ENCODE_INDEX) -1; if (i == -1) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFDEFAULTKEYID, &i); if (err) return err; } if (i<0 || i>= ZD1201_NUMKEYS) return -EINVAL; erq->flags |= i+1; erq->length = zd->encode_keylen[i]; memcpy(key, zd->encode_keys[i], erq->length); return 0; } static int zd1201_set_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, duration, level; int err; enabled = vwrq->disabled ? 0 : 1; if (enabled) { if (vwrq->flags & IW_POWER_PERIOD) { duration = vwrq->value; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, duration); if (err) return err; goto out; } if (vwrq->flags & IW_POWER_TIMEOUT) { err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; level = vwrq->value * 4 / duration; if (level > 4) level = 4; if (level < 0) level = 0; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPMEPS, level); if (err) return err; goto out; } return -EINVAL; } out: return zd1201_setconfig16(zd, ZD1201_RID_CNFPMENABLED, enabled); } static int zd1201_get_power(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short enabled, level, duration; int err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMENABLED, &enabled); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFPMEPS, &level); if (err) return err; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXSLEEPDURATION, &duration); if (err) return err; vwrq->disabled = enabled ? 0 : 1; if (vwrq->flags & IW_POWER_TYPE) { if (vwrq->flags & IW_POWER_PERIOD) { vwrq->value = duration; vwrq->flags = IW_POWER_PERIOD; } else { vwrq->value = duration * level / 4; vwrq->flags = IW_POWER_TIMEOUT; } } if (vwrq->flags & IW_POWER_MODE) { if (enabled && level) vwrq->flags = IW_POWER_UNICAST_R; else vwrq->flags = IW_POWER_ALL_R; } return 0; } static const iw_handler zd1201_iw_handler[] = { (iw_handler) zd1201_config_commit, /* SIOCSIWCOMMIT */ (iw_handler) zd1201_get_name, /* SIOCGIWNAME */ (iw_handler) NULL, /* SIOCSIWNWID */ (iw_handler) NULL, /* SIOCGIWNWID */ (iw_handler) zd1201_set_freq, /* SIOCSIWFREQ */ (iw_handler) zd1201_get_freq, /* SIOCGIWFREQ */ (iw_handler) zd1201_set_mode, /* SIOCSIWMODE */ (iw_handler) zd1201_get_mode, /* SIOCGIWMODE */ (iw_handler) NULL, /* SIOCSIWSENS */ (iw_handler) NULL, /* SIOCGIWSENS */ (iw_handler) NULL, /* SIOCSIWRANGE */ (iw_handler) zd1201_get_range, /* SIOCGIWRANGE */ (iw_handler) NULL, /* SIOCSIWPRIV */ (iw_handler) NULL, /* SIOCGIWPRIV */ (iw_handler) NULL, /* SIOCSIWSTATS */ (iw_handler) NULL, /* SIOCGIWSTATS */ (iw_handler) NULL, /* SIOCSIWSPY */ (iw_handler) NULL, /* SIOCGIWSPY */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL/*zd1201_set_wap*/, /* SIOCSIWAP */ (iw_handler) zd1201_get_wap, /* SIOCGIWAP */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* SIOCGIWAPLIST */ (iw_handler) zd1201_set_scan, /* SIOCSIWSCAN */ (iw_handler) zd1201_get_scan, /* SIOCGIWSCAN */ (iw_handler) zd1201_set_essid, /* SIOCSIWESSID */ (iw_handler) zd1201_get_essid, /* SIOCGIWESSID */ (iw_handler) NULL, /* SIOCSIWNICKN */ (iw_handler) zd1201_get_nick, /* SIOCGIWNICKN */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) NULL, /* -- hole -- */ (iw_handler) zd1201_set_rate, /* SIOCSIWRATE */ (iw_handler) zd1201_get_rate, /* SIOCGIWRATE */ (iw_handler) zd1201_set_rts, /* SIOCSIWRTS */ (iw_handler) zd1201_get_rts, /* SIOCGIWRTS */ (iw_handler) zd1201_set_frag, /* SIOCSIWFRAG */ (iw_handler) zd1201_get_frag, /* SIOCGIWFRAG */ (iw_handler) NULL, /* SIOCSIWTXPOW */ (iw_handler) NULL, /* SIOCGIWTXPOW */ (iw_handler) zd1201_set_retry, /* SIOCSIWRETRY */ (iw_handler) zd1201_get_retry, /* SIOCGIWRETRY */ (iw_handler) zd1201_set_encode, /* SIOCSIWENCODE */ (iw_handler) zd1201_get_encode, /* SIOCGIWENCODE */ (iw_handler) zd1201_set_power, /* SIOCSIWPOWER */ (iw_handler) zd1201_get_power, /* SIOCGIWPOWER */ }; static int zd1201_set_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); if (!zd->ap) return -EOPNOTSUPP; return zd1201_setconfig16(zd, ZD1201_RID_CNFHOSTAUTH, rrq->value); } static int zd1201_get_hostauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short hostauth; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFHOSTAUTH, &hostauth); if (err) return err; rrq->value = hostauth; rrq->fixed = 1; return 0; } static int zd1201_auth_sta(struct net_device *dev, struct iw_request_info *info, struct sockaddr *sta, char *extra) { struct zd1201 *zd = netdev_priv(dev); unsigned char buffer[10]; if (!zd->ap) return -EOPNOTSUPP; memcpy(buffer, sta->sa_data, ETH_ALEN); *(short*)(buffer+6) = 0; /* 0==success, 1==failure */ *(short*)(buffer+8) = 0; return zd1201_setconfig(zd, ZD1201_RID_AUTHENTICATESTA, buffer, 10, 1); } static int zd1201_set_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value); if (err) return err; return 0; } static int zd1201_get_maxassoc(struct net_device *dev, struct iw_request_info *info, struct iw_param *rrq, char *extra) { struct zd1201 *zd = netdev_priv(dev); short maxassoc; int err; if (!zd->ap) return -EOPNOTSUPP; err = zd1201_getconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, &maxassoc); if (err) return err; rrq->value = maxassoc; rrq->fixed = 1; return 0; } static const iw_handler zd1201_private_handler[] = { (iw_handler) zd1201_set_hostauth, /* ZD1201SIWHOSTAUTH */ (iw_handler) zd1201_get_hostauth, /* ZD1201GIWHOSTAUTH */ (iw_handler) zd1201_auth_sta, /* ZD1201SIWAUTHSTA */ (iw_handler) NULL, /* nothing to get */ (iw_handler) zd1201_set_maxassoc, /* ZD1201SIMAXASSOC */ (iw_handler) zd1201_get_maxassoc, /* ZD1201GIMAXASSOC */ }; static const struct iw_priv_args zd1201_private_args[] = { { ZD1201SIWHOSTAUTH, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "sethostauth" }, { ZD1201GIWHOSTAUTH, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostauth" }, { ZD1201SIWAUTHSTA, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "authstation" }, { ZD1201SIWMAXASSOC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_NONE, "setmaxassoc" }, { ZD1201GIWMAXASSOC, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmaxassoc" }, }; static const struct iw_handler_def zd1201_iw_handlers = { .num_standard = ARRAY_SIZE(zd1201_iw_handler), .num_private = ARRAY_SIZE(zd1201_private_handler), .num_private_args = ARRAY_SIZE(zd1201_private_args), .standard = (iw_handler *)zd1201_iw_handler, .private = (iw_handler *)zd1201_private_handler, .private_args = (struct iw_priv_args *) zd1201_private_args, .get_wireless_stats = zd1201_get_wireless_stats, }; static const struct net_device_ops zd1201_netdev_ops = { .ndo_open = zd1201_net_open, .ndo_stop = zd1201_net_stop, .ndo_start_xmit = zd1201_hard_start_xmit, .ndo_tx_timeout = zd1201_tx_timeout, .ndo_set_rx_mode = zd1201_set_multicast, .ndo_set_mac_address = zd1201_set_mac_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int zd1201_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct zd1201 *zd; struct net_device *dev; struct usb_device *usb; int err; short porttype; char buf[IW_ESSID_MAX_SIZE+2]; usb = interface_to_usbdev(interface); dev = alloc_etherdev(sizeof(*zd)); if (!dev) return -ENOMEM; zd = netdev_priv(dev); zd->dev = dev; zd->ap = ap; zd->usb = usb; zd->removed = 0; init_waitqueue_head(&zd->rxdataq); INIT_HLIST_HEAD(&zd->fraglist); err = zd1201_fw_upload(usb, zd->ap); if (err) { dev_err(&usb->dev, "zd1201 firmware upload failed: %d\n", err); goto err_zd; } zd->endp_in = 1; zd->endp_out = 1; zd->endp_out2 = 2; zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL); zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!zd->rx_urb || !zd->tx_urb) goto err_zd; mdelay(100); err = zd1201_drvr_start(zd); if (err) goto err_zd; err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXDATALEN, 2312); if (err) goto err_start; err = zd1201_setconfig16(zd, ZD1201_RID_TXRATECNTL, ZD1201_RATEB1 | ZD1201_RATEB2 | ZD1201_RATEB5 | ZD1201_RATEB11); if (err) goto err_start; dev->netdev_ops = &zd1201_netdev_ops; dev->wireless_handlers = &zd1201_iw_handlers; dev->watchdog_timeo = ZD1201_TX_TIMEOUT; strcpy(dev->name, "wlan%d"); err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, dev->dev_addr, dev->addr_len); if (err) goto err_start; /* Set wildcard essid to match zd->essid */ *(__le16 *)buf = cpu_to_le16(0); err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, IW_ESSID_MAX_SIZE+2, 1); if (err) goto err_start; if (zd->ap) porttype = ZD1201_PORTTYPE_AP; else porttype = ZD1201_PORTTYPE_BSS; err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); if (err) goto err_start; SET_NETDEV_DEV(dev, &usb->dev); err = register_netdev(dev); if (err) goto err_start; dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n", dev->name); usb_set_intfdata(interface, zd); zd1201_enable(zd); /* zd1201 likes to startup enabled, */ zd1201_disable(zd); /* interfering with all the wifis in range */ return 0; err_start: /* Leave the device in reset state */ zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); err_zd: usb_free_urb(zd->tx_urb); usb_free_urb(zd->rx_urb); free_netdev(dev); return err; } static void zd1201_disconnect(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); struct hlist_node *node, *node2; struct zd1201_frag *frag; if (!zd) return; usb_set_intfdata(interface, NULL); hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { hlist_del_init(&frag->fnode); kfree_skb(frag->skb); kfree(frag); } if (zd->tx_urb) { usb_kill_urb(zd->tx_urb); usb_free_urb(zd->tx_urb); } if (zd->rx_urb) { usb_kill_urb(zd->rx_urb); usb_free_urb(zd->rx_urb); } if (zd->dev) { unregister_netdev(zd->dev); free_netdev(zd->dev); } } #ifdef CONFIG_PM static int zd1201_suspend(struct usb_interface *interface, pm_message_t message) { struct zd1201 *zd = usb_get_intfdata(interface); netif_device_detach(zd->dev); zd->was_enabled = zd->mac_enabled; if (zd->was_enabled) return zd1201_disable(zd); else return 0; } static int zd1201_resume(struct usb_interface *interface) { struct zd1201 *zd = usb_get_intfdata(interface); if (!zd || !zd->dev) return -ENODEV; netif_device_attach(zd->dev); if (zd->was_enabled) return zd1201_enable(zd); else return 0; } #else #define zd1201_suspend NULL #define zd1201_resume NULL #endif static struct usb_driver zd1201_usb = { .name = "zd1201", .probe = zd1201_probe, .disconnect = zd1201_disconnect, .id_table = zd1201_table, .suspend = zd1201_suspend, .resume = zd1201_resume, }; module_usb_driver(zd1201_usb);
gpl-2.0
nychitman1/android_kernel_htc_flounder
arch/mips/sgi-ip32/ip32-irq.c
4268
12925
/* * Code to handle IP32 IRQs * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000 Harald Koerfgen * Copyright (C) 2001 Keith M Wesolowski */ #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <asm/signal.h> #include <asm/time.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> #include <asm/ip32/ip32_ints.h> /* issue a PIO read to make sure no PIO writes are pending */ static void inline flush_crime_bus(void) { crime->control; } static void inline flush_mace_bus(void) { mace->perif.ctrl.misc; } /* * O2 irq map * * IP0 -> software (ignored) * IP1 -> software (ignored) * IP2 -> (irq0) C crime 1.1 all interrupts; crime 1.5 ??? * IP3 -> (irq1) X unknown * IP4 -> (irq2) X unknown * IP5 -> (irq3) X unknown * IP6 -> (irq4) X unknown * IP7 -> (irq5) 7 CPU count/compare timer (system timer) * * crime: (C) * * CRIME_INT_STAT 31:0: * * 0 -> 8 Video in 1 * 1 -> 9 Video in 2 * 2 -> 10 Video out * 3 -> 11 Mace ethernet * 4 -> S SuperIO sub-interrupt * 5 -> M Miscellaneous sub-interrupt * 6 -> A Audio sub-interrupt * 7 -> 15 PCI bridge errors * 8 -> 16 PCI SCSI aic7xxx 0 * 9 -> 17 PCI SCSI aic7xxx 1 * 10 -> 18 PCI slot 0 * 11 -> 19 unused (PCI slot 1) * 12 -> 20 unused (PCI slot 2) * 13 -> 21 unused (PCI shared 0) * 14 -> 22 unused (PCI shared 1) * 15 -> 23 unused (PCI shared 2) * 16 -> 24 GBE0 (E) * 17 -> 25 GBE1 (E) * 18 -> 26 GBE2 (E) * 19 -> 27 GBE3 (E) * 20 -> 28 CPU errors * 21 -> 29 Memory errors * 22 -> 30 RE empty edge (E) * 23 -> 31 RE full edge (E) * 24 -> 32 RE idle edge (E) * 25 -> 33 RE empty level * 26 -> 34 RE full level * 27 -> 35 RE idle level * 28 -> 36 unused (software 0) (E) * 29 -> 37 unused (software 1) (E) * 30 -> 38 unused (software 2) - crime 1.5 CPU SysCorError (E) * 31 -> 39 VICE * * S, M, A: Use the MACE ISA interrupt register * MACE_ISA_INT_STAT 31:0 * * 0-7 -> 40-47 Audio * 8 -> 48 RTC * 9 -> 49 Keyboard * 10 -> X Keyboard polled * 11 -> 51 Mouse * 12 -> X Mouse polled * 13-15 -> 53-55 Count/compare timers * 16-19 -> 56-59 Parallel (16 E) * 20-25 -> 60-62 Serial 1 (22 E) * 26-31 -> 66-71 Serial 2 (28 E) * * Note that this means IRQs 12-14, 50, and 52 do not exist. This is a * different IRQ map than IRIX uses, but that's OK as Linux irq handling * is quite different anyway. */ /* Some initial interrupts to set up */ extern irqreturn_t crime_memerr_intr(int irq, void *dev_id); extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id); static struct irqaction memerr_irq = { .handler = crime_memerr_intr, .name = "CRIME memory error", }; static struct irqaction cpuerr_irq = { .handler = crime_cpuerr_intr, .name = "CRIME CPU error", }; /* * This is for pure CRIME interrupts - ie not MACE. The advantage? * We get to split the register in half and do faster lookups. */ static uint64_t crime_mask; static inline void crime_enable_irq(struct irq_data *d) { unsigned int bit = d->irq - CRIME_IRQ_BASE; crime_mask |= 1 << bit; crime->imask = crime_mask; } static inline void crime_disable_irq(struct irq_data *d) { unsigned int bit = d->irq - CRIME_IRQ_BASE; crime_mask &= ~(1 << bit); crime->imask = crime_mask; flush_crime_bus(); } static struct irq_chip crime_level_interrupt = { .name = "IP32 CRIME", .irq_mask = crime_disable_irq, .irq_unmask = crime_enable_irq, }; static void crime_edge_mask_and_ack_irq(struct irq_data *d) { unsigned int bit = d->irq - CRIME_IRQ_BASE; uint64_t crime_int; /* Edge triggered interrupts must be cleared. */ crime_int = crime->hard_int; crime_int &= ~(1 << bit); crime->hard_int = crime_int; crime_disable_irq(d); } static struct irq_chip crime_edge_interrupt = { .name = "IP32 CRIME", .irq_ack = crime_edge_mask_and_ack_irq, .irq_mask = crime_disable_irq, .irq_mask_ack = crime_edge_mask_and_ack_irq, .irq_unmask = crime_enable_irq, }; /* * This is for MACE PCI interrupts. We can decrease bus traffic by masking * as close to the source as possible. This also means we can take the * next chunk of the CRIME register in one piece. */ static unsigned long macepci_mask; static void enable_macepci_irq(struct irq_data *d) { macepci_mask |= MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ); mace->pci.control = macepci_mask; crime_mask |= 1 << (d->irq - CRIME_IRQ_BASE); crime->imask = crime_mask; } static void disable_macepci_irq(struct irq_data *d) { crime_mask &= ~(1 << (d->irq - CRIME_IRQ_BASE)); crime->imask = crime_mask; flush_crime_bus(); macepci_mask &= ~MACEPCI_CONTROL_INT(d->irq - MACEPCI_SCSI0_IRQ); mace->pci.control = macepci_mask; flush_mace_bus(); } static struct irq_chip ip32_macepci_interrupt = { .name = "IP32 MACE PCI", .irq_mask = disable_macepci_irq, .irq_unmask = enable_macepci_irq, }; /* This is used for MACE ISA interrupts. That means bits 4-6 in the * CRIME register. */ #define MACEISA_AUDIO_INT (MACEISA_AUDIO_SW_INT | \ MACEISA_AUDIO_SC_INT | \ MACEISA_AUDIO1_DMAT_INT | \ MACEISA_AUDIO1_OF_INT | \ MACEISA_AUDIO2_DMAT_INT | \ MACEISA_AUDIO2_MERR_INT | \ MACEISA_AUDIO3_DMAT_INT | \ MACEISA_AUDIO3_MERR_INT) #define MACEISA_MISC_INT (MACEISA_RTC_INT | \ MACEISA_KEYB_INT | \ MACEISA_KEYB_POLL_INT | \ MACEISA_MOUSE_INT | \ MACEISA_MOUSE_POLL_INT | \ MACEISA_TIMER0_INT | \ MACEISA_TIMER1_INT | \ MACEISA_TIMER2_INT) #define MACEISA_SUPERIO_INT (MACEISA_PARALLEL_INT | \ MACEISA_PAR_CTXA_INT | \ MACEISA_PAR_CTXB_INT | \ MACEISA_PAR_MERR_INT | \ MACEISA_SERIAL1_INT | \ MACEISA_SERIAL1_TDMAT_INT | \ MACEISA_SERIAL1_TDMAPR_INT | \ MACEISA_SERIAL1_TDMAME_INT | \ MACEISA_SERIAL1_RDMAT_INT | \ MACEISA_SERIAL1_RDMAOR_INT | \ MACEISA_SERIAL2_INT | \ MACEISA_SERIAL2_TDMAT_INT | \ MACEISA_SERIAL2_TDMAPR_INT | \ MACEISA_SERIAL2_TDMAME_INT | \ MACEISA_SERIAL2_RDMAT_INT | \ MACEISA_SERIAL2_RDMAOR_INT) static unsigned long maceisa_mask; static void enable_maceisa_irq(struct irq_data *d) { unsigned int crime_int = 0; pr_debug("maceisa enable: %u\n", d->irq); switch (d->irq) { case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ: crime_int = MACE_AUDIO_INT; break; case MACEISA_RTC_IRQ ... MACEISA_TIMER2_IRQ: crime_int = MACE_MISC_INT; break; case MACEISA_PARALLEL_IRQ ... MACEISA_SERIAL2_RDMAOR_IRQ: crime_int = MACE_SUPERIO_INT; break; } pr_debug("crime_int %08x enabled\n", crime_int); crime_mask |= crime_int; crime->imask = crime_mask; maceisa_mask |= 1 << (d->irq - MACEISA_AUDIO_SW_IRQ); mace->perif.ctrl.imask = maceisa_mask; } static void disable_maceisa_irq(struct irq_data *d) { unsigned int crime_int = 0; maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ)); if (!(maceisa_mask & MACEISA_AUDIO_INT)) crime_int |= MACE_AUDIO_INT; if (!(maceisa_mask & MACEISA_MISC_INT)) crime_int |= MACE_MISC_INT; if (!(maceisa_mask & MACEISA_SUPERIO_INT)) crime_int |= MACE_SUPERIO_INT; crime_mask &= ~crime_int; crime->imask = crime_mask; flush_crime_bus(); mace->perif.ctrl.imask = maceisa_mask; flush_mace_bus(); } static void mask_and_ack_maceisa_irq(struct irq_data *d) { unsigned long mace_int; /* edge triggered */ mace_int = mace->perif.ctrl.istat; mace_int &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ)); mace->perif.ctrl.istat = mace_int; disable_maceisa_irq(d); } static struct irq_chip ip32_maceisa_level_interrupt = { .name = "IP32 MACE ISA", .irq_mask = disable_maceisa_irq, .irq_unmask = enable_maceisa_irq, }; static struct irq_chip ip32_maceisa_edge_interrupt = { .name = "IP32 MACE ISA", .irq_ack = mask_and_ack_maceisa_irq, .irq_mask = disable_maceisa_irq, .irq_mask_ack = mask_and_ack_maceisa_irq, .irq_unmask = enable_maceisa_irq, }; /* This is used for regular non-ISA, non-PCI MACE interrupts. That means * bits 0-3 and 7 in the CRIME register. */ static void enable_mace_irq(struct irq_data *d) { unsigned int bit = d->irq - CRIME_IRQ_BASE; crime_mask |= (1 << bit); crime->imask = crime_mask; } static void disable_mace_irq(struct irq_data *d) { unsigned int bit = d->irq - CRIME_IRQ_BASE; crime_mask &= ~(1 << bit); crime->imask = crime_mask; flush_crime_bus(); } static struct irq_chip ip32_mace_interrupt = { .name = "IP32 MACE", .irq_mask = disable_mace_irq, .irq_unmask = enable_mace_irq, }; static void ip32_unknown_interrupt(void) { printk("Unknown interrupt occurred!\n"); printk("cp0_status: %08x\n", read_c0_status()); printk("cp0_cause: %08x\n", read_c0_cause()); printk("CRIME intr mask: %016lx\n", crime->imask); printk("CRIME intr status: %016lx\n", crime->istat); printk("CRIME hardware intr register: %016lx\n", crime->hard_int); printk("MACE ISA intr mask: %08lx\n", mace->perif.ctrl.imask); printk("MACE ISA intr status: %08lx\n", mace->perif.ctrl.istat); printk("MACE PCI control register: %08x\n", mace->pci.control); printk("Register dump:\n"); show_regs(get_irq_regs()); printk("Please mail this report to linux-mips@linux-mips.org\n"); printk("Spinning..."); while(1) ; } /* CRIME 1.1 appears to deliver all interrupts to this one pin. */ /* change this to loop over all edge-triggered irqs, exception masked out ones */ static void ip32_irq0(void) { uint64_t crime_int; int irq = 0; /* * Sanity check interrupt numbering enum. * MACE got 32 interrupts and there are 32 MACE ISA interrupts daisy * chained. */ BUILD_BUG_ON(CRIME_VICE_IRQ - MACE_VID_IN1_IRQ != 31); BUILD_BUG_ON(MACEISA_SERIAL2_RDMAOR_IRQ - MACEISA_AUDIO_SW_IRQ != 31); crime_int = crime->istat & crime_mask; /* crime sometime delivers spurious interrupts, ignore them */ if (unlikely(crime_int == 0)) return; irq = MACE_VID_IN1_IRQ + __ffs(crime_int); if (crime_int & CRIME_MACEISA_INT_MASK) { unsigned long mace_int = mace->perif.ctrl.istat; irq = __ffs(mace_int & maceisa_mask) + MACEISA_AUDIO_SW_IRQ; } pr_debug("*irq %u*\n", irq); do_IRQ(irq); } static void ip32_irq1(void) { ip32_unknown_interrupt(); } static void ip32_irq2(void) { ip32_unknown_interrupt(); } static void ip32_irq3(void) { ip32_unknown_interrupt(); } static void ip32_irq4(void) { ip32_unknown_interrupt(); } static void ip32_irq5(void) { do_IRQ(MIPS_CPU_IRQ_BASE + 7); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause(); if (likely(pending & IE_IRQ0)) ip32_irq0(); else if (unlikely(pending & IE_IRQ1)) ip32_irq1(); else if (unlikely(pending & IE_IRQ2)) ip32_irq2(); else if (unlikely(pending & IE_IRQ3)) ip32_irq3(); else if (unlikely(pending & IE_IRQ4)) ip32_irq4(); else if (likely(pending & IE_IRQ5)) ip32_irq5(); } void __init arch_init_irq(void) { unsigned int irq; /* Install our interrupt handler, then clear and disable all * CRIME and MACE interrupts. */ crime->imask = 0; crime->hard_int = 0; crime->soft_int = 0; mace->perif.ctrl.istat = 0; mace->perif.ctrl.imask = 0; mips_cpu_irq_init(); for (irq = CRIME_IRQ_BASE; irq <= IP32_IRQ_MAX; irq++) { switch (irq) { case MACE_VID_IN1_IRQ ... MACE_PCI_BRIDGE_IRQ: irq_set_chip_and_handler_name(irq, &ip32_mace_interrupt, handle_level_irq, "level"); break; case MACEPCI_SCSI0_IRQ ... MACEPCI_SHARED2_IRQ: irq_set_chip_and_handler_name(irq, &ip32_macepci_interrupt, handle_level_irq, "level"); break; case CRIME_CPUERR_IRQ: case CRIME_MEMERR_IRQ: irq_set_chip_and_handler_name(irq, &crime_level_interrupt, handle_level_irq, "level"); break; case CRIME_GBE0_IRQ ... CRIME_GBE3_IRQ: case CRIME_RE_EMPTY_E_IRQ ... CRIME_RE_IDLE_E_IRQ: case CRIME_SOFT0_IRQ ... CRIME_SOFT2_IRQ: case CRIME_VICE_IRQ: irq_set_chip_and_handler_name(irq, &crime_edge_interrupt, handle_edge_irq, "edge"); break; case MACEISA_PARALLEL_IRQ: case MACEISA_SERIAL1_TDMAPR_IRQ: case MACEISA_SERIAL2_TDMAPR_IRQ: irq_set_chip_and_handler_name(irq, &ip32_maceisa_edge_interrupt, handle_edge_irq, "edge"); break; default: irq_set_chip_and_handler_name(irq, &ip32_maceisa_level_interrupt, handle_level_irq, "level"); break; } } setup_irq(CRIME_MEMERR_IRQ, &memerr_irq); setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq); #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) change_c0_status(ST0_IM, ALLINTS); }
gpl-2.0
jollaman999/jolla-kernel_G_Gen2
arch/mips/cavium-octeon/executive/cvmx-spi.c
4780
22409
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * * Support library for the SPI */ #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-config.h> #include <asm/octeon/cvmx-pko.h> #include <asm/octeon/cvmx-spi.h> #include <asm/octeon/cvmx-spxx-defs.h> #include <asm/octeon/cvmx-stxx-defs.h> #include <asm/octeon/cvmx-srxx-defs.h> #define INVOKE_CB(function_p, args...) \ do { \ if (function_p) { \ res = function_p(args); \ if (res) \ return res; \ } \ } while (0) #if CVMX_ENABLE_DEBUG_PRINTS static const char *modes[] = { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" }; #endif /* Default callbacks, can be overridden * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks */ static cvmx_spi_callbacks_t cvmx_spi_callbacks = { .reset_cb = cvmx_spi_reset_cb, .calendar_setup_cb = cvmx_spi_calendar_setup_cb, .clock_detect_cb = cvmx_spi_clock_detect_cb, .training_cb = cvmx_spi_training_cb, .calendar_sync_cb = cvmx_spi_calendar_sync_cb, .interface_up_cb = cvmx_spi_interface_up_cb }; /** * Get current SPI4 initialization callbacks * * @callbacks: Pointer to the callbacks structure.to fill * * Returns Pointer to cvmx_spi_callbacks_t structure. */ void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks) { memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks)); } /** * Set new SPI4 initialization callbacks * * @new_callbacks: Pointer to an updated callbacks structure. */ void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks) { memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks)); } /** * Initialize and start the SPI interface. * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for clock synchronization in seconds * @num_ports: Number of SPI ports to configure * * Returns Zero on success, negative of failure. */ int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout, int num_ports) { int res = -1; if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) return res; /* Callback to perform SPI4 reset */ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode); /* Callback to perform calendar setup */ INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode, num_ports); /* Callback to perform clock detection */ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout); /* Callback to perform SPI4 link training */ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout); /* Callback to perform calendar sync */ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, timeout); /* Callback to handle interface coming up */ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode); return res; } /** * This routine restarts the SPI interface after it has lost synchronization * with its correspondent system. * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for clock synchronization in seconds * * Returns Zero on success, negative of failure. */ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout) { int res = -1; if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))) return res; cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]); /* Callback to perform SPI4 reset */ INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode); /* NOTE: Calendar setup is not performed during restart */ /* Refer to cvmx_spi_start_interface() for the full sequence */ /* Callback to perform clock detection */ INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout); /* Callback to perform SPI4 link training */ INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout); /* Callback to perform calendar sync */ INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode, timeout); /* Callback to handle interface coming up */ INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode); return res; } /** * Callback to perform SPI4 reset * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode) { union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl; union cvmx_spxx_clk_ctl spxx_clk_ctl; union cvmx_spxx_bist_stat spxx_bist_stat; union cvmx_spxx_int_msk spxx_int_msk; union cvmx_stxx_int_msk stxx_int_msk; union cvmx_spxx_trn4_ctl spxx_trn4_ctl; int index; uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; /* Disable SPI error events while we run BIST */ spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface)); cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface)); cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); /* Run BIST in the SPI interface */ cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0); cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0); spxx_clk_ctl.u64 = 0; spxx_clk_ctl.s.runbist = 1; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); cvmx_wait(10 * MS); spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface)); if (spxx_bist_stat.s.stat0) cvmx_dprintf ("ERROR SPI%d: BIST failed on receive datapath FIFO\n", interface); if (spxx_bist_stat.s.stat1) cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n", interface); if (spxx_bist_stat.s.stat2) cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n", interface); /* Clear the calendar table after BIST to fix parity errors */ for (index = 0; index < 32; index++) { union cvmx_srxx_spi4_calx srxx_spi4_calx; union cvmx_stxx_spi4_calx stxx_spi4_calx; srxx_spi4_calx.u64 = 0; srxx_spi4_calx.s.oddpar = 1; cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), srxx_spi4_calx.u64); stxx_spi4_calx.u64 = 0; stxx_spi4_calx.s.oddpar = 1; cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), stxx_spi4_calx.u64); } /* Re enable reporting of error interrupts */ cvmx_write_csr(CVMX_SPXX_INT_REG(interface), cvmx_read_csr(CVMX_SPXX_INT_REG(interface))); cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64); cvmx_write_csr(CVMX_STXX_INT_REG(interface), cvmx_read_csr(CVMX_STXX_INT_REG(interface))); cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64); /* Setup the CLKDLY right in the middle */ spxx_clk_ctl.u64 = 0; spxx_clk_ctl.s.seetrn = 0; spxx_clk_ctl.s.clkdly = 0x10; spxx_clk_ctl.s.runbist = 0; spxx_clk_ctl.s.statdrv = 0; /* This should always be on the opposite edge as statdrv */ spxx_clk_ctl.s.statrcv = 1; spxx_clk_ctl.s.sndtrn = 0; spxx_clk_ctl.s.drptrn = 0; spxx_clk_ctl.s.rcvtrn = 0; spxx_clk_ctl.s.srxdlck = 0; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); cvmx_wait(100 * MS); /* Reset SRX0 DLL */ spxx_clk_ctl.s.srxdlck = 1; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); /* Waiting for Inf0 Spi4 RX DLL to lock */ cvmx_wait(100 * MS); /* Enable dynamic alignment */ spxx_trn4_ctl.s.trntest = 0; spxx_trn4_ctl.s.jitter = 1; spxx_trn4_ctl.s.clr_boot = 1; spxx_trn4_ctl.s.set_boot = 0; if (OCTEON_IS_MODEL(OCTEON_CN58XX)) spxx_trn4_ctl.s.maxdist = 3; else spxx_trn4_ctl.s.maxdist = 8; spxx_trn4_ctl.s.macro_en = 1; spxx_trn4_ctl.s.mux_en = 1; cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); spxx_dbg_deskew_ctl.u64 = 0; cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface), spxx_dbg_deskew_ctl.u64); return 0; } /** * Callback to setup calendar and miscellaneous settings before clock detection * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @num_ports: Number of ports to configure on SPI * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode, int num_ports) { int port; int index; if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { union cvmx_srxx_com_ctl srxx_com_ctl; union cvmx_srxx_spi4_stat srxx_spi4_stat; /* SRX0 number of Ports */ srxx_com_ctl.u64 = 0; srxx_com_ctl.s.prts = num_ports - 1; srxx_com_ctl.s.st_en = 0; srxx_com_ctl.s.inf_en = 0; cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); /* SRX0 Calendar Table. This round robbins through all ports */ port = 0; index = 0; while (port < num_ports) { union cvmx_srxx_spi4_calx srxx_spi4_calx; srxx_spi4_calx.u64 = 0; srxx_spi4_calx.s.prt0 = port++; srxx_spi4_calx.s.prt1 = port++; srxx_spi4_calx.s.prt2 = port++; srxx_spi4_calx.s.prt3 = port++; srxx_spi4_calx.s.oddpar = ~(cvmx_dpop(srxx_spi4_calx.u64) & 1); cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface), srxx_spi4_calx.u64); index++; } srxx_spi4_stat.u64 = 0; srxx_spi4_stat.s.len = num_ports; srxx_spi4_stat.s.m = 1; cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface), srxx_spi4_stat.u64); } if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { union cvmx_stxx_arb_ctl stxx_arb_ctl; union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max; union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh; union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl; union cvmx_stxx_spi4_stat stxx_spi4_stat; union cvmx_stxx_spi4_dat stxx_spi4_dat; /* STX0 Config */ stxx_arb_ctl.u64 = 0; stxx_arb_ctl.s.igntpa = 0; stxx_arb_ctl.s.mintrn = 0; cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64); gmxx_tx_spi_max.u64 = 0; gmxx_tx_spi_max.s.max1 = 8; gmxx_tx_spi_max.s.max2 = 4; gmxx_tx_spi_max.s.slice = 0; cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface), gmxx_tx_spi_max.u64); gmxx_tx_spi_thresh.u64 = 0; gmxx_tx_spi_thresh.s.thresh = 4; cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface), gmxx_tx_spi_thresh.u64); gmxx_tx_spi_ctl.u64 = 0; gmxx_tx_spi_ctl.s.tpa_clr = 0; gmxx_tx_spi_ctl.s.cont_pkt = 0; cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface), gmxx_tx_spi_ctl.u64); /* STX0 Training Control */ stxx_spi4_dat.u64 = 0; /*Minimum needed by dynamic alignment */ stxx_spi4_dat.s.alpha = 32; stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */ cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface), stxx_spi4_dat.u64); /* STX0 Calendar Table. This round robbins through all ports */ port = 0; index = 0; while (port < num_ports) { union cvmx_stxx_spi4_calx stxx_spi4_calx; stxx_spi4_calx.u64 = 0; stxx_spi4_calx.s.prt0 = port++; stxx_spi4_calx.s.prt1 = port++; stxx_spi4_calx.s.prt2 = port++; stxx_spi4_calx.s.prt3 = port++; stxx_spi4_calx.s.oddpar = ~(cvmx_dpop(stxx_spi4_calx.u64) & 1); cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface), stxx_spi4_calx.u64); index++; } stxx_spi4_stat.u64 = 0; stxx_spi4_stat.s.len = num_ports; stxx_spi4_stat.s.m = 1; cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface), stxx_spi4_stat.u64); } return 0; } /** * Callback to perform clock detection * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for clock synchronization in seconds * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout) { int clock_transitions; union cvmx_spxx_clk_stat stat; uint64_t timeout_time; uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; /* * Regardless of operating mode, both Tx and Rx clocks must be * present for the SPI interface to operate. */ cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface); timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; /* * Require 100 clock transitions in order to avoid any noise * in the beginning. */ clock_transitions = 100; do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) { /* * We've seen a clock transition, so decrement * the number we still need. */ clock_transitions--; cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); stat.s.s4clk0 = 0; stat.s.s4clk1 = 0; } if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0); cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface); timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; /* * Require 100 clock transitions in order to avoid any noise in the * beginning. */ clock_transitions = 100; do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) { /* * We've seen a clock transition, so decrement * the number we still need */ clock_transitions--; cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); stat.s.d4clk0 = 0; stat.s.d4clk1 = 0; } if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0); return 0; } /** * Callback to perform link training * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for link to be trained (in seconds) * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout) { union cvmx_spxx_trn4_ctl spxx_trn4_ctl; union cvmx_spxx_clk_stat stat; uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; int rx_training_needed; /* SRX0 & STX0 Inf0 Links are configured - begin training */ union cvmx_spxx_clk_ctl spxx_clk_ctl; spxx_clk_ctl.u64 = 0; spxx_clk_ctl.s.seetrn = 0; spxx_clk_ctl.s.clkdly = 0x10; spxx_clk_ctl.s.runbist = 0; spxx_clk_ctl.s.statdrv = 0; /* This should always be on the opposite edge as statdrv */ spxx_clk_ctl.s.statrcv = 1; spxx_clk_ctl.s.sndtrn = 1; spxx_clk_ctl.s.drptrn = 1; spxx_clk_ctl.s.rcvtrn = 1; spxx_clk_ctl.s.srxdlck = 1; cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64); cvmx_wait(1000 * MS); /* SRX0 clear the boot bit */ spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface)); spxx_trn4_ctl.s.clr_boot = 1; cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64); /* Wait for the training sequence to complete */ cvmx_dprintf("SPI%d: Waiting for training\n", interface); cvmx_wait(1000 * MS); /* Wait a really long time here */ timeout_time = cvmx_get_cycle() + 1000ull * MS * 600; /* * The HRM says we must wait for 34 + 16 * MAXDIST training sequences. * We'll be pessimistic and wait for a lot more. */ rx_training_needed = 500; do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (stat.s.srxtrn && rx_training_needed) { rx_training_needed--; cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64); stat.s.srxtrn = 0; } if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.srxtrn == 0); return 0; } /** * Callback to perform calendar data synchronization * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * @timeout: Timeout to wait for calendar data in seconds * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout) { uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000; if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { /* SRX0 interface should be good, send calendar data */ union cvmx_srxx_com_ctl srxx_com_ctl; cvmx_dprintf ("SPI%d: Rx is synchronized, start sending calendar data\n", interface); srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface)); srxx_com_ctl.s.inf_en = 1; srxx_com_ctl.s.st_en = 1; cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); } if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { /* STX0 has achieved sync */ /* The corespondant board should be sending calendar data */ /* Enable the STX0 STAT receiver. */ union cvmx_spxx_clk_stat stat; uint64_t timeout_time; union cvmx_stxx_com_ctl stxx_com_ctl; stxx_com_ctl.u64 = 0; stxx_com_ctl.s.st_en = 1; cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64); /* Waiting for calendar sync on STX0 STAT */ cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n", interface, interface); timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout; /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */ do { stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface)); if (cvmx_get_cycle() > timeout_time) { cvmx_dprintf("SPI%d: Timeout\n", interface); return -1; } } while (stat.s.stxcal == 0); } return 0; } /** * Callback to handle interface up * * @interface: The identifier of the packet interface to configure and * use as a SPI interface. * @mode: The operating mode for the SPI interface. The interface * can operate as a full duplex (both Tx and Rx data paths * active) or as a halfplex (either the Tx data path is * active or the Rx data path is active, but not both). * * Returns Zero on success, non-zero error code on failure (will cause * SPI initialization to abort) */ int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode) { union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min; union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max; union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber; if (mode & CVMX_SPI_MODE_RX_HALFPLEX) { union cvmx_srxx_com_ctl srxx_com_ctl; srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface)); srxx_com_ctl.s.inf_en = 1; cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64); cvmx_dprintf("SPI%d: Rx is now up\n", interface); } if (mode & CVMX_SPI_MODE_TX_HALFPLEX) { union cvmx_stxx_com_ctl stxx_com_ctl; stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface)); stxx_com_ctl.s.inf_en = 1; cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64); cvmx_dprintf("SPI%d: Tx is now up\n", interface); } gmxx_rxx_frm_min.u64 = 0; gmxx_rxx_frm_min.s.len = 64; cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface), gmxx_rxx_frm_min.u64); gmxx_rxx_frm_max.u64 = 0; gmxx_rxx_frm_max.s.len = 64 * 1024 - 4; cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface), gmxx_rxx_frm_max.u64); gmxx_rxx_jabber.u64 = 0; gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4; cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64); return 0; }
gpl-2.0
olegsvs/android_kernel_ark_benefit_m7
arch/sh/boards/mach-se/7780/setup.c
4780
3029
/* * linux/arch/sh/boards/se/7780/setup.c * * Copyright (C) 2006,2007 Nobuhiro Iwamatsu * * Hitachi UL SolutionEngine 7780 Support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <asm/machvec.h> #include <mach-se/mach/se7780.h> #include <asm/io.h> #include <asm/heartbeat.h> /* Heartbeat */ static struct resource heartbeat_resource = { .start = PA_LED, .end = PA_LED, .flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT, }; static struct platform_device heartbeat_device = { .name = "heartbeat", .id = -1, .num_resources = 1, .resource = &heartbeat_resource, }; /* SMC91x */ static struct resource smc91x_eth_resources[] = { [0] = { .name = "smc91x-regs" , .start = PA_LAN + 0x300, .end = PA_LAN + 0x300 + 0x10 , .flags = IORESOURCE_MEM, }, [1] = { .start = SMC_IRQ, .end = SMC_IRQ, .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_eth_device = { .name = "smc91x", .id = 0, .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, }, .num_resources = ARRAY_SIZE(smc91x_eth_resources), .resource = smc91x_eth_resources, }; static struct platform_device *se7780_devices[] __initdata = { &heartbeat_device, &smc91x_eth_device, }; static int __init se7780_devices_setup(void) { return platform_add_devices(se7780_devices, ARRAY_SIZE(se7780_devices)); } device_initcall(se7780_devices_setup); #define GPIO_PHCR 0xFFEA000E #define GPIO_PMSELR 0xFFEA0080 #define GPIO_PECR 0xFFEA0008 static void __init se7780_setup(char **cmdline_p) { /* "SH-Linux" on LED Display */ __raw_writew( 'S' , PA_LED_DISP + (DISP_SEL0_ADDR << 1) ); __raw_writew( 'H' , PA_LED_DISP + (DISP_SEL1_ADDR << 1) ); __raw_writew( '-' , PA_LED_DISP + (DISP_SEL2_ADDR << 1) ); __raw_writew( 'L' , PA_LED_DISP + (DISP_SEL3_ADDR << 1) ); __raw_writew( 'i' , PA_LED_DISP + (DISP_SEL4_ADDR << 1) ); __raw_writew( 'n' , PA_LED_DISP + (DISP_SEL5_ADDR << 1) ); __raw_writew( 'u' , PA_LED_DISP + (DISP_SEL6_ADDR << 1) ); __raw_writew( 'x' , PA_LED_DISP + (DISP_SEL7_ADDR << 1) ); printk(KERN_INFO "Hitachi UL Solutions Engine 7780SE03 support.\n"); /* * PCI REQ/GNT setting * REQ0/GNT0 -> USB * REQ1/GNT1 -> PC Card * REQ2/GNT2 -> Serial ATA * REQ3/GNT3 -> PCI slot */ __raw_writew(0x0213, FPGA_REQSEL); /* GPIO setting */ __raw_writew(0x0000, GPIO_PECR); __raw_writew(__raw_readw(GPIO_PHCR)&0xfff3, GPIO_PHCR); __raw_writew(0x0c00, GPIO_PMSELR); /* iVDR Power ON */ __raw_writew(0x0001, FPGA_IVDRPW); } /* * The Machine Vector */ static struct sh_machine_vector mv_se7780 __initmv = { .mv_name = "Solution Engine 7780" , .mv_setup = se7780_setup , .mv_init_irq = init_se7780_IRQ, };
gpl-2.0
djdeeles/android_kernel_lge_g3
lib/debugobjects.c
4780
26845
/* * Generic infrastructure for lifetime debugging of objects. * * Started by Thomas Gleixner * * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> * * For licencing details see kernel-base/COPYING */ #include <linux/debugobjects.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/hash.h> #define ODEBUG_HASH_BITS 14 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) #define ODEBUG_POOL_SIZE 512 #define ODEBUG_POOL_MIN_LEVEL 256 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) struct debug_bucket { struct hlist_head list; raw_spinlock_t lock; }; static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; static DEFINE_RAW_SPINLOCK(pool_lock); static HLIST_HEAD(obj_pool); static int obj_pool_min_free = ODEBUG_POOL_SIZE; static int obj_pool_free = ODEBUG_POOL_SIZE; static int obj_pool_used; static int obj_pool_max_used; static struct kmem_cache *obj_cache; static int debug_objects_maxchain __read_mostly; static int debug_objects_fixups __read_mostly; static int debug_objects_warnings __read_mostly; static int debug_objects_enabled __read_mostly = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; static struct debug_obj_descr *descr_test __read_mostly; static void free_obj_work(struct work_struct *work); static DECLARE_WORK(debug_obj_work, free_obj_work); static int __init enable_object_debug(char *str) { debug_objects_enabled = 1; return 0; } static int __init disable_object_debug(char *str) { debug_objects_enabled = 0; return 0; } early_param("debug_objects", enable_object_debug); early_param("no_debug_objects", disable_object_debug); static const char *obj_states[ODEBUG_STATE_MAX] = { [ODEBUG_STATE_NONE] = "none", [ODEBUG_STATE_INIT] = "initialized", [ODEBUG_STATE_INACTIVE] = "inactive", [ODEBUG_STATE_ACTIVE] = "active", [ODEBUG_STATE_DESTROYED] = "destroyed", [ODEBUG_STATE_NOTAVAILABLE] = "not available", }; static int fill_pool(void) { gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; struct debug_obj *new; unsigned long flags; if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) return obj_pool_free; if (unlikely(!obj_cache)) return obj_pool_free; while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { new = kmem_cache_zalloc(obj_cache, gfp); if (!new) return obj_pool_free; raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); obj_pool_free++; raw_spin_unlock_irqrestore(&pool_lock, flags); } return obj_pool_free; } /* * Lookup an object in the hash bucket. */ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) { struct hlist_node *node; struct debug_obj *obj; int cnt = 0; hlist_for_each_entry(obj, node, &b->list, node) { cnt++; if (obj->object == addr) return obj; } if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; return NULL; } /* * Allocate a new object. If the pool is empty, switch off the debugger. * Must be called with interrupts disabled. */ static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) { struct debug_obj *obj = NULL; raw_spin_lock(&pool_lock); if (obj_pool.first) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); obj->object = addr; obj->descr = descr; obj->state = ODEBUG_STATE_NONE; obj->astate = 0; hlist_del(&obj->node); hlist_add_head(&obj->node, &b->list); obj_pool_used++; if (obj_pool_used > obj_pool_max_used) obj_pool_max_used = obj_pool_used; obj_pool_free--; if (obj_pool_free < obj_pool_min_free) obj_pool_min_free = obj_pool_free; } raw_spin_unlock(&pool_lock); return obj; } /* * workqueue function to free objects. */ static void free_obj_work(struct work_struct *work) { struct debug_obj *obj; unsigned long flags; raw_spin_lock_irqsave(&pool_lock, flags); while (obj_pool_free > ODEBUG_POOL_SIZE) { obj = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&obj->node); obj_pool_free--; /* * We release pool_lock across kmem_cache_free() to * avoid contention on pool_lock. */ raw_spin_unlock_irqrestore(&pool_lock, flags); kmem_cache_free(obj_cache, obj); raw_spin_lock_irqsave(&pool_lock, flags); } raw_spin_unlock_irqrestore(&pool_lock, flags); } /* * Put the object back into the pool and schedule work to free objects * if necessary. */ static void free_object(struct debug_obj *obj) { unsigned long flags; int sched = 0; raw_spin_lock_irqsave(&pool_lock, flags); /* * schedule work when the pool is filled and the cache is * initialized: */ if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) sched = keventd_up() && !work_pending(&debug_obj_work); hlist_add_head(&obj->node, &obj_pool); obj_pool_free++; obj_pool_used--; raw_spin_unlock_irqrestore(&pool_lock, flags); if (sched) schedule_work(&debug_obj_work); } /* * We run out of memory. That means we probably have tons of objects * allocated. */ static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; int i; printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { raw_spin_lock_irqsave(&db->lock, flags); hlist_move_list(&db->list, &freelist); raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } } } /* * We use the pfn of the address for the hash. That way we can check * for freed objects simply by checking the affected bucket. */ static struct debug_bucket *get_bucket(unsigned long addr) { unsigned long hash; hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); return &obj_hash[hash]; } static void debug_print_object(struct debug_obj *obj, char *msg) { struct debug_obj_descr *descr = obj->descr; static int limit; if (limit < 5 && descr != descr_test) { void *hint = descr->debug_hint ? descr->debug_hint(obj->object) : NULL; limit++; WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " "object type: %s hint: %pS\n", msg, obj_states[obj->state], obj->astate, descr->name, hint); } debug_objects_warnings++; } /* * Try to repair the damage, so we have a better chance to get useful * debug output. */ static int debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state) { int fixed = 0; if (fixup) fixed = fixup(addr, state); debug_objects_fixups += fixed; return fixed; } static void debug_object_is_on_stack(void *addr, int onstack) { int is_on_stack; static int limit; if (limit > 4) return; is_on_stack = object_is_on_stack(addr); if (is_on_stack == onstack) return; limit++; if (is_on_stack) printk(KERN_WARNING "ODEBUG: object is on stack, but not annotated\n"); else printk(KERN_WARNING "ODEBUG: object is not on stack, but annotated\n"); WARN_ON(1); } static void __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; fill_pool(); db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) { obj = alloc_object(addr, db, descr); if (!obj) { debug_objects_enabled = 0; raw_spin_unlock_irqrestore(&db->lock, flags); debug_objects_oom(); return; } debug_object_is_on_stack(addr, onstack); } switch (obj->state) { case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_INIT; break; case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "init"); state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_init, addr, state); return; case ODEBUG_STATE_DESTROYED: debug_print_object(obj, "init"); break; default: break; } raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_init - debug checks when an object is initialized * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_init(void *addr, struct debug_obj_descr *descr) { if (!debug_objects_enabled) return; __debug_object_init(addr, descr, 0); } /** * debug_object_init_on_stack - debug checks when an object on stack is * initialized * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { if (!debug_objects_enabled) return; __debug_object_init(addr, descr, 1); } /** * debug_object_activate - debug checks when an object is activated * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_activate(void *addr, struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { switch (obj->state) { case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_ACTIVE; break; case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "activate"); state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_activate, addr, state); return; case ODEBUG_STATE_DESTROYED: debug_print_object(obj, "activate"); break; default: break; } raw_spin_unlock_irqrestore(&db->lock, flags); return; } raw_spin_unlock_irqrestore(&db->lock, flags); /* * This happens when a static object is activated. We * let the type specific code decide whether this is * true or not. */ if (debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE)) debug_print_object(&o, "activate"); } /** * debug_object_deactivate - debug checks when an object is deactivated * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { switch (obj->state) { case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: if (!obj->astate) obj->state = ODEBUG_STATE_INACTIVE; else debug_print_object(obj, "deactivate"); break; case ODEBUG_STATE_DESTROYED: debug_print_object(obj, "deactivate"); break; default: break; } } else { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; debug_print_object(&o, "deactivate"); } raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_destroy - debug checks when an object is destroyed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) goto out_unlock; switch (obj->state) { case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_DESTROYED; break; case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "destroy"); state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_destroy, addr, state); return; case ODEBUG_STATE_DESTROYED: debug_print_object(obj, "destroy"); break; default: break; } out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_free - debug checks when an object is freed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_free(void *addr, struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) goto out_unlock; switch (obj->state) { case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "free"); state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, addr, state); return; default: hlist_del(&obj->node); raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_assert_init - debug checks when object should be init-ed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; raw_spin_unlock_irqrestore(&db->lock, flags); /* * Maybe the object is static. Let the type specific * code decide what to do. */ if (debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE)) debug_print_object(&o, "assert_init"); return; } raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_active_state - debug checks object usage state machine * @addr: address of the object * @descr: pointer to an object specific debug description structure * @expect: expected state * @next: state to move to if expected state is found */ void debug_object_active_state(void *addr, struct debug_obj_descr *descr, unsigned int expect, unsigned int next) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { switch (obj->state) { case ODEBUG_STATE_ACTIVE: if (obj->astate == expect) obj->astate = next; else debug_print_object(obj, "active_state"); break; default: debug_print_object(obj, "active_state"); break; } } else { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; debug_print_object(&o, "active_state"); } raw_spin_unlock_irqrestore(&db->lock, flags); } #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; struct hlist_node *node, *tmp; HLIST_HEAD(freelist); struct debug_obj_descr *descr; enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; int cnt; saddr = (unsigned long) address; eaddr = saddr + size; paddr = saddr & ODEBUG_CHUNK_MASK; chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); chunks >>= ODEBUG_CHUNK_SHIFT; for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { db = get_bucket(paddr); repeat: cnt = 0; raw_spin_lock_irqsave(&db->lock, flags); hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { cnt++; oaddr = (unsigned long) obj->object; if (oaddr < saddr || oaddr >= eaddr) continue; switch (obj->state) { case ODEBUG_STATE_ACTIVE: debug_print_object(obj, "free"); descr = obj->descr; state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_object_fixup(descr->fixup_free, (void *) oaddr, state); goto repeat; default: hlist_del(&obj->node); hlist_add_head(&obj->node, &freelist); break; } } raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; } } void debug_check_no_obj_freed(const void *address, unsigned long size) { if (debug_objects_enabled) __debug_check_no_obj_freed(address, size); } #endif #ifdef CONFIG_DEBUG_FS static int debug_stats_show(struct seq_file *m, void *v) { seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "fixups :%d\n", debug_objects_fixups); seq_printf(m, "pool_free :%d\n", obj_pool_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_used :%d\n", obj_pool_used); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); return 0; } static int debug_stats_open(struct inode *inode, struct file *filp) { return single_open(filp, debug_stats_show, NULL); } static const struct file_operations debug_stats_fops = { .open = debug_stats_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init debug_objects_init_debugfs(void) { struct dentry *dbgdir, *dbgstats; if (!debug_objects_enabled) return 0; dbgdir = debugfs_create_dir("debug_objects", NULL); if (!dbgdir) return -ENOMEM; dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); if (!dbgstats) goto err; return 0; err: debugfs_remove(dbgdir); return -ENOMEM; } __initcall(debug_objects_init_debugfs); #else static inline void debug_objects_init_debugfs(void) { } #endif #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST /* Random data structure for the self test */ struct self_test { unsigned long dummy1[6]; int static_init; unsigned long dummy2[3]; }; static __initdata struct debug_obj_descr descr_type_test; /* * fixup_init is called when: * - an active object is initialized */ static int __init fixup_init(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_init(obj, &descr_type_test); return 1; default: return 0; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) */ static int __init fixup_activate(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: if (obj->static_init == 1) { debug_object_init(obj, &descr_type_test); debug_object_activate(obj, &descr_type_test); return 0; } return 1; case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_activate(obj, &descr_type_test); return 1; default: return 0; } } /* * fixup_destroy is called when: * - an active object is destroyed */ static int __init fixup_destroy(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_destroy(obj, &descr_type_test); return 1; default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int __init fixup_free(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_free(obj, &descr_type_test); return 1; default: return 0; } } static int __init check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; int res = -EINVAL; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); goto out; } if (obj && obj->state != state) { WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", obj->state, state); goto out; } if (fixups != debug_objects_fixups) { WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", fixups, debug_objects_fixups); goto out; } if (warnings != debug_objects_warnings) { WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", warnings, debug_objects_warnings); goto out; } res = 0; out: raw_spin_unlock_irqrestore(&db->lock, flags); if (res) debug_objects_enabled = 0; return res; } static __initdata struct debug_obj_descr descr_type_test = { .name = "selftest", .fixup_init = fixup_init, .fixup_activate = fixup_activate, .fixup_destroy = fixup_destroy, .fixup_free = fixup_free, }; static __initdata struct self_test obj = { .static_init = 0 }; static void __init debug_objects_selftest(void) { int fixups, oldfixups, warnings, oldwarnings; unsigned long flags; local_irq_save(flags); fixups = oldfixups = debug_objects_fixups; warnings = oldwarnings = debug_objects_warnings; descr_test = &descr_type_test; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) goto out; debug_object_deactivate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) goto out; debug_object_destroy(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) goto out; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_deactivate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_free(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) goto out; obj.static_init = 1; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) goto out; debug_object_free(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) goto out; #ifdef CONFIG_DEBUG_OBJECTS_FREE debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; __debug_check_no_obj_freed(&obj, sizeof(obj)); if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) goto out; #endif printk(KERN_INFO "ODEBUG: selftest passed\n"); out: debug_objects_fixups = oldfixups; debug_objects_warnings = oldwarnings; descr_test = NULL; local_irq_restore(flags); } #else static inline void debug_objects_selftest(void) { } #endif /* * Called during early boot to initialize the hash buckets and link * the static object pool objects into the poll list. After this call * the object tracker is fully operational. */ void __init debug_objects_early_init(void) { int i; for (i = 0; i < ODEBUG_HASH_SIZE; i++) raw_spin_lock_init(&obj_hash[i].lock); for (i = 0; i < ODEBUG_POOL_SIZE; i++) hlist_add_head(&obj_static_pool[i].node, &obj_pool); } /* * Convert the statically allocated objects to dynamic ones: */ static int __init debug_objects_replace_static_objects(void) { struct debug_bucket *db = obj_hash; struct hlist_node *node, *tmp; struct debug_obj *obj, *new; HLIST_HEAD(objects); int i, cnt = 0; for (i = 0; i < ODEBUG_POOL_SIZE; i++) { obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); if (!obj) goto free; hlist_add_head(&obj->node, &objects); } /* * When debug_objects_mem_init() is called we know that only * one CPU is up, so disabling interrupts is enough * protection. This avoids the lockdep hell of lock ordering. */ local_irq_disable(); /* Remove the statically allocated objects from the pool */ hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) hlist_del(&obj->node); /* Move the allocated objects to the pool */ hlist_move_list(&objects, &obj_pool); /* Replace the active object references */ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { hlist_move_list(&db->list, &objects); hlist_for_each_entry(obj, node, &objects, node) { new = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&new->node); /* copy object data */ *new = *obj; hlist_add_head(&new->node, &db->list); cnt++; } } printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, obj_pool_used); local_irq_enable(); return 0; free: hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } return -ENOMEM; } /* * Called after the kmem_caches are functional to setup a dedicated * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag * prevents that the debug code is called on kmem_cache_free() for the * debug tracker objects to avoid recursive calls. */ void __init debug_objects_mem_init(void) { if (!debug_objects_enabled) return; obj_cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, SLAB_DEBUG_OBJECTS, NULL); if (!obj_cache || debug_objects_replace_static_objects()) { debug_objects_enabled = 0; if (obj_cache) kmem_cache_destroy(obj_cache); printk(KERN_WARNING "ODEBUG: out of memory.\n"); } else debug_objects_selftest(); }
gpl-2.0
JianguoWEI/Linux-EFQ
drivers/media/dvb/dvb-usb/gl861.c
5036
4812
/* DVB USB compliant linux driver for GL861 USB2.0 devices. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "gl861.h" #include "zl10353.h" #include "qt1010.h" /* debug */ static int dvb_usb_gl861_debug; module_param_named(debug, dvb_usb_gl861_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u16 index; u16 value = addr << (8 + 1); int wo = (rbuf == NULL || rlen == 0); /* write-only */ u8 req, type; if (wo) { req = GL861_REQ_I2C_WRITE; type = GL861_WRITE; } else { /* rw */ req = GL861_REQ_I2C_READ; type = GL861_READ; } switch (wlen) { case 1: index = wbuf[0]; break; case 2: index = wbuf[0]; value = value + wbuf[1]; break; default: warn("wlen = %x, aborting.", wlen); return -EINVAL; } msleep(1); /* avoid I2C errors */ return usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), req, type, value, index, rbuf, rlen, 2000); } /* I2C */ static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 gl861_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm gl861_i2c_algo = { .master_xfer = gl861_i2c_xfer, .functionality = gl861_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config gl861_zl10353_config = { .demod_address = 0x0f, .no_tuner = 1, .parallel_ts = 1, }; static int gl861_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &gl861_zl10353_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe == NULL) return -EIO; return 0; } static struct qt1010_config gl861_qt1010_config = { .i2c_address = 0x62 }; static int gl861_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &gl861_qt1010_config) == NULL ? -ENODEV : 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties gl861_properties; static int gl861_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d; struct usb_host_interface *alt; int ret; if (intf->num_altsetting < 2) return -ENODEV; ret = dvb_usb_device_init(intf, &gl861_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { alt = usb_altnum_to_altsetting(intf, 0); if (alt == NULL) { deb_rc("not alt found!\n"); return -ENODEV; } ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } return ret; } static struct usb_device_id gl861_table [] = { { USB_DEVICE(USB_VID_MSI, USB_PID_MSI_MEGASKY580_55801) }, { USB_DEVICE(USB_VID_ALINK, USB_VID_ALINK_DTU) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gl861_table); static struct dvb_usb_device_properties gl861_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = 0, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .frontend_attach = gl861_frontend_attach, .tuner_attach = gl861_tuner_attach, .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, } }, .i2c_algo = &gl861_i2c_algo, .num_device_descs = 2, .devices = { { .name = "MSI Mega Sky 55801 DVB-T USB2.0", .cold_ids = { NULL }, .warm_ids = { &gl861_table[0], NULL }, }, { .name = "A-LINK DTU DVB-T USB2.0", .cold_ids = { NULL }, .warm_ids = { &gl861_table[1], NULL }, }, } }; static struct usb_driver gl861_driver = { .name = "dvb_usb_gl861", .probe = gl861_probe, .disconnect = dvb_usb_device_exit, .id_table = gl861_table, }; module_usb_driver(gl861_driver); MODULE_AUTHOR("Carl Lundqvist <comabug@gmail.com>"); MODULE_DESCRIPTION("Driver MSI Mega Sky 580 DVB-T USB2.0 / GL861"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
Altaf-Mahdi/android_kernel_oneplus_msm8974
drivers/media/dvb/dvb-usb/au6610.c
5036
5910
/* * DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0. * * Copyright (C) 2006 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "au6610.h" #include "zl10353.h" #include "qt1010.h" /* debug */ static int dvb_usb_au6610_debug; module_param_named(debug, dvb_usb_au6610_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret; u16 index; u8 *usb_buf; /* * allocate enough for all known requests, * read returns 5 and write 6 bytes */ usb_buf = kmalloc(6, GFP_KERNEL); if (!usb_buf) return -ENOMEM; switch (wlen) { case 1: index = wbuf[0] << 8; break; case 2: index = wbuf[0] << 8; index += wbuf[1]; break; default: warn("wlen = %x, aborting.", wlen); ret = -EINVAL; goto error; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation, USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index, usb_buf, 6, AU6610_USB_TIMEOUT); if (ret < 0) goto error; switch (operation) { case AU6610_REQ_I2C_READ: case AU6610_REQ_USB_READ: /* requested value is always 5th byte in buffer */ rbuf[0] = usb_buf[4]; } error: kfree(usb_buf); return ret; } static int au6610_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 request; u8 wo = (rbuf == NULL || rlen == 0); /* write-only */ if (wo) { request = AU6610_REQ_I2C_WRITE; } else { /* rw */ request = AU6610_REQ_I2C_READ; } return au6610_usb_msg(d, request, addr, wbuf, wlen, rbuf, rlen); } /* I2C */ static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 au6610_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm au6610_i2c_algo = { .master_xfer = au6610_i2c_xfer, .functionality = au6610_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config au6610_zl10353_config = { .demod_address = 0x0f, .no_tuner = 1, .parallel_ts = 1, }; static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe_adap[0].fe = dvb_attach(zl10353_attach, &au6610_zl10353_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe == NULL) return -ENODEV; return 0; } static struct qt1010_config au6610_qt1010_config = { .i2c_address = 0x62 }; static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &au6610_qt1010_config) == NULL ? -ENODEV : 0; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties au6610_properties; static int au6610_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d; struct usb_host_interface *alt; int ret; if (intf->num_altsetting < AU6610_ALTSETTING_COUNT) return -ENODEV; ret = dvb_usb_device_init(intf, &au6610_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { alt = usb_altnum_to_altsetting(intf, AU6610_ALTSETTING); if (alt == NULL) { deb_info("%s: no alt found!\n", __func__); return -ENODEV; } ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } return ret; } static struct usb_device_id au6610_table [] = { { USB_DEVICE(USB_VID_ALCOR_MICRO, USB_PID_SIGMATEK_DVB_110) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, au6610_table); static struct dvb_usb_device_properties au6610_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .size_of_priv = 0, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = au6610_zl10353_frontend_attach, .tuner_attach = au6610_qt1010_tuner_attach, .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x82, .u = { .isoc = { .framesperurb = 40, .framesize = 942, .interval = 1, } } }, }}, } }, .i2c_algo = &au6610_i2c_algo, .num_device_descs = 1, .devices = { { .name = "Sigmatek DVB-110 DVB-T USB2.0", .cold_ids = {NULL}, .warm_ids = {&au6610_table[0], NULL}, }, } }; static struct usb_driver au6610_driver = { .name = "dvb_usb_au6610", .probe = au6610_probe, .disconnect = dvb_usb_device_exit, .id_table = au6610_table, }; module_usb_driver(au6610_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
gpl-2.0
xboxfanj/android_kernel_oneplus_msm8974
drivers/net/ethernet/natsemi/xtsonic.c
5036
8392
/* * xtsonic.c * * (C) 2001 - 2007 Tensilica Inc. * Kevin Chea <kchea@yahoo.com> * Marc Gauthier <marc@linux-xtensa.org> * Chris Zankel <chris@zankel.net> * * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de) * * This driver is based on work from Andreas Busse, but most of * the code is rewritten. * * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de) * * A driver for the onboard Sonic ethernet controller on the XT2000. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/gfp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/dma.h> static char xtsonic_string[] = "xtsonic"; extern unsigned xtboard_nvram_valid(void); extern void xtboard_get_ether_addr(unsigned char *buf); #include "sonic.h" /* * According to the documentation for the Sonic ethernet controller, * EOBC should be 760 words (1520 bytes) for 32-bit applications, and, * as such, 2 words less than the buffer size. The value for RBSIZE * defined in sonic.h, however is only 1520. * * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and * RBSIZE 1520 bytes) */ #undef SONIC_RBSIZE #define SONIC_RBSIZE 1524 /* * The chip provides 256 byte register space. */ #define SONIC_MEM_SIZE 0x100 /* * Macros to access SONIC registers */ #define SONIC_READ(reg) \ (0xffff & *((volatile unsigned int *)dev->base_addr+reg)) #define SONIC_WRITE(reg,val) \ *((volatile unsigned int *)dev->base_addr+reg) = val /* Use 0 for production, 1 for verification, and >2 for debug */ #ifdef SONIC_DEBUG static unsigned int sonic_debug = SONIC_DEBUG; #else static unsigned int sonic_debug = 1; #endif /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. * So we check for known Silicon Revision IDs instead. */ static unsigned short known_revisions[] = { 0x101, /* SONIC 83934 */ 0xffff /* end of list */ }; static int xtsonic_open(struct net_device *dev) { int retval; retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); return -EAGAIN; } retval = sonic_open(dev); if (retval) free_irq(dev->irq, dev); return retval; } static int xtsonic_close(struct net_device *dev) { int err; err = sonic_close(dev); free_irq(dev->irq, dev); return err; } static const struct net_device_ops xtsonic_netdev_ops = { .ndo_open = xtsonic_open, .ndo_stop = xtsonic_close, .ndo_start_xmit = sonic_send_packet, .ndo_get_stats = sonic_get_stats, .ndo_set_rx_mode = sonic_multicast_list, .ndo_tx_timeout = sonic_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, }; static int __init sonic_probe1(struct net_device *dev) { static unsigned version_printed = 0; unsigned int silicon_revision; struct sonic_local *lp = netdev_priv(dev); unsigned int base_addr = dev->base_addr; int i; int err = 0; if (!request_mem_region(base_addr, 0x100, xtsonic_string)) return -EBUSY; /* * get the Silicon Revision ID. If this is one of the known * one assume that we found a SONIC ethernet controller at * the expected location. */ silicon_revision = SONIC_READ(SONIC_SR); if (sonic_debug > 1) printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision); i = 0; while ((known_revisions[i] != 0xffff) && (known_revisions[i] != silicon_revision)) i++; if (known_revisions[i] == 0xffff) { printk("SONIC ethernet controller not found (0x%4x)\n", silicon_revision); return -ENODEV; } if (sonic_debug && version_printed++ == 0) printk(version); /* * Put the sonic into software reset, then retrieve ethernet address. * Note: we are assuming that the boot-loader has initialized the cam. */ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_DCR, SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS); SONIC_WRITE(SONIC_CEP,0); SONIC_WRITE(SONIC_IMR,0); SONIC_WRITE(SONIC_CMD,SONIC_CR_RST); SONIC_WRITE(SONIC_CEP,0); for (i=0; i<3; i++) { unsigned int val = SONIC_READ(SONIC_CAP0-i); dev->dev_addr[i*2] = val; dev->dev_addr[i*2+1] = val >> 8; } /* Initialize the device structure. */ lp->dma_bitmode = SONIC_BITMODE32; /* * Allocate local private descriptor areas in uncached space. * The entire structure must be located within the same 64kb segment. * A simple way to ensure this is to allocate twice the * size of the structure -- given that the structure is * much less than 64 kB, at least one of the halves of * the allocated area will be contained entirely in 64 kB. * We also allocate extra space for a pointer to allow freeing * this structure later on (in xtsonic_cleanup_module()). */ lp->descriptors = dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), &lp->descriptors_laddr, GFP_KERNEL); if (lp->descriptors == NULL) { printk(KERN_ERR "%s: couldn't alloc DMA memory for " " descriptors.\n", dev_name(lp->device)); goto out; } lp->cda = lp->descriptors; lp->tda = lp->cda + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); /* get the virtual dma address */ lp->cda_laddr = lp->descriptors_laddr; lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS * SONIC_BUS_SCALE(lp->dma_bitmode)); lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); dev->netdev_ops = &xtsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* * clear tally counter */ SONIC_WRITE(SONIC_CRCT,0xffff); SONIC_WRITE(SONIC_FAET,0xffff); SONIC_WRITE(SONIC_MPT,0xffff); return 0; out: release_region(dev->base_addr, SONIC_MEM_SIZE); return err; } /* * Probe for a SONIC ethernet controller on an XT2000 board. * Actually probing is superfluous but we're paranoid. */ int __devinit xtsonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; struct resource *resmem, *resirq; int err = 0; if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) return -ENODEV; if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL) return -ENODEV; if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL) return -ENOMEM; lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); netdev_boot_setup_check(dev); dev->base_addr = resmem->start; dev->irq = resirq->start; if ((err = sonic_probe1(dev))) goto out; if ((err = register_netdev(dev))) goto out1; printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name, dev->base_addr, dev->dev_addr, dev->irq); return 0; out1: release_region(dev->base_addr, SONIC_MEM_SIZE); out: free_netdev(dev); return err; } MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver"); module_param(sonic_debug, int, 0); MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)"); #include "sonic.c" static int __devexit xtsonic_device_remove (struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sonic_local *lp = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), lp->descriptors, lp->descriptors_laddr); release_region (dev->base_addr, SONIC_MEM_SIZE); free_netdev(dev); return 0; } static struct platform_driver xtsonic_driver = { .probe = xtsonic_probe, .remove = __devexit_p(xtsonic_device_remove), .driver = { .name = xtsonic_string, }, }; module_platform_driver(xtsonic_driver);
gpl-2.0
omnirom/android_kernel_google_msm
arch/unicore32/kernel/hibernate.c
6828
3524
/* * linux/arch/unicore32/kernel/hibernate.c * * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gfp.h> #include <linux/suspend.h> #include <linux/bootmem.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/suspend.h> #include "mach/pm.h" /* Pointer to the temporary resume page tables */ pgd_t *resume_pg_dir; struct swsusp_arch_regs swsusp_arch_regs_cpu0; /* * Create a middle page table on a resume-safe page and put a pointer to it in * the given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t *resume_one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table on a resume-safe page and place a pointer to it in * a middle page directory entry. */ static pte_t *resume_one_page_table_init(pmd_t *pmd) { if (pmd_none(*pmd)) { pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); if (!page_table) return NULL; set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); return page_table; } return pte_offset_kernel(pmd, 0); } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. */ static int resume_physical_mapping_init(pgd_t *pgd_base) { unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int pgd_idx, pmd_idx; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (pfn >= max_low_pfn) continue; for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { pte_t *max_pte; if (pfn >= max_low_pfn) break; /* Map with normal page tables. * NOTE: We can mark everything as executable here */ pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; max_pte = pte + PTRS_PER_PTE; for (; pte < max_pte; pte++, pfn++) { if (pfn >= max_low_pfn) break; set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); } } } return 0; } static inline void resume_init_first_level_page_table(pgd_t *pg_dir) { } int swsusp_arch_resume(void) { int error; resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!resume_pg_dir) return -ENOMEM; resume_init_first_level_page_table(resume_pg_dir); error = resume_physical_mapping_init(resume_pg_dir); if (error) return error; /* We have got enough memory and from now on we cannot recover */ restore_image(resume_pg_dir, restore_pblist); return 0; } /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; return (pfn >= begin_pfn) && (pfn < end_pfn); } void save_processor_state(void) { } void restore_processor_state(void) { local_flush_tlb_all(); }
gpl-2.0
olefb/android_kernel_sony_msm8974
sound/oss/audio.c
8108
25630
/* * sound/oss/audio.c * * Device file manager for /dev/audio */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) * Thomas Sailer : moved several static variables into struct audio_operations * (which is grossly misnamed btw.) because they have the same * lifetime as the rest in there and dynamic allocation saves * 12k or so * Thomas Sailer : use more logical O_NONBLOCK semantics * Daniel Rodriksson: reworked the use of the device specific copy_user * still generic * Horst von Brand: Add missing #include <linux/string.h> * Chris Rankin : Update the module-usage counter for the coprocessor, * and decrement the counters again if we cannot open * the audio device. */ #include <linux/stddef.h> #include <linux/string.h> #include <linux/kmod.h> #include "sound_config.h" #include "ulaw.h" #include "coproc.h" #define NEUTRAL8 0x80 #define NEUTRAL16 0x00 static int dma_ioctl(int dev, unsigned int cmd, void __user *arg); static int set_format(int dev, int fmt) { if (fmt != AFMT_QUERY) { audio_devs[dev]->local_conversion = 0; if (!(audio_devs[dev]->format_mask & fmt)) /* Not supported */ { if (fmt == AFMT_MU_LAW) { fmt = AFMT_U8; audio_devs[dev]->local_conversion = CNV_MU_LAW; } else fmt = AFMT_U8; /* This is always supported */ } audio_devs[dev]->audio_format = audio_devs[dev]->d->set_bits(dev, fmt); audio_devs[dev]->local_format = fmt; } else return audio_devs[dev]->local_format; if (audio_devs[dev]->local_conversion) return audio_devs[dev]->local_conversion; else return audio_devs[dev]->local_format; } int audio_open(int dev, struct file *file) { int ret; int bits; int dev_type = dev & 0x0f; int mode = translate_mode(file); const struct audio_driver *driver; const struct coproc_operations *coprocessor; dev = dev >> 4; if (dev_type == SND_DEV_DSP16) bits = 16; else bits = 8; if (dev < 0 || dev >= num_audiodevs) return -ENXIO; driver = audio_devs[dev]->d; if (!try_module_get(driver->owner)) return -ENODEV; if ((ret = DMAbuf_open(dev, mode)) < 0) goto error_1; if ( (coprocessor = audio_devs[dev]->coproc) != NULL ) { if (!try_module_get(coprocessor->owner)) goto error_2; if ((ret = coprocessor->open(coprocessor->devc, COPR_PCM)) < 0) { printk(KERN_WARNING "Sound: Can't access coprocessor device\n"); goto error_3; } } audio_devs[dev]->local_conversion = 0; if (dev_type == SND_DEV_AUDIO) set_format(dev, AFMT_MU_LAW); else set_format(dev, bits); audio_devs[dev]->audio_mode = AM_NONE; return 0; /* * Clean-up stack: this is what needs (un)doing if * we can't open the audio device ... */ error_3: module_put(coprocessor->owner); error_2: DMAbuf_release(dev, mode); error_1: module_put(driver->owner); return ret; } static void sync_output(int dev) { int p, i; int l; struct dma_buffparms *dmap = audio_devs[dev]->dmap_out; if (dmap->fragment_size <= 0) return; dmap->flags |= DMA_POST; /* Align the write pointer with fragment boundaries */ if ((l = dmap->user_counter % dmap->fragment_size) > 0) { int len; unsigned long offs = dmap->user_counter % dmap->bytes_in_use; len = dmap->fragment_size - l; memset(dmap->raw_buf + offs, dmap->neutral_byte, len); DMAbuf_move_wrpointer(dev, len); } /* * Clean all unused buffer fragments. */ p = dmap->qtail; dmap->flags |= DMA_POST; for (i = dmap->qlen + 1; i < dmap->nbufs; i++) { p = (p + 1) % dmap->nbufs; if (((dmap->raw_buf + p * dmap->fragment_size) + dmap->fragment_size) > (dmap->raw_buf + dmap->buffsize)) printk(KERN_ERR "audio: Buffer error 2\n"); memset(dmap->raw_buf + p * dmap->fragment_size, dmap->neutral_byte, dmap->fragment_size); } dmap->flags |= DMA_DIRTY; } void audio_release(int dev, struct file *file) { const struct coproc_operations *coprocessor; int mode = translate_mode(file); dev = dev >> 4; /* * We do this in DMAbuf_release(). Why are we doing it * here? Why don't we test the file mode before setting * both flags? DMAbuf_release() does. * ...pester...pester...pester... */ audio_devs[dev]->dmap_out->closing = 1; audio_devs[dev]->dmap_in->closing = 1; /* * We need to make sure we allocated the dmap_out buffer * before we go mucking around with it in sync_output(). */ if (mode & OPEN_WRITE) sync_output(dev); if ( (coprocessor = audio_devs[dev]->coproc) != NULL ) { coprocessor->close(coprocessor->devc, COPR_PCM); module_put(coprocessor->owner); } DMAbuf_release(dev, mode); module_put(audio_devs[dev]->d->owner); } static void translate_bytes(const unsigned char *table, unsigned char *buff, int n) { unsigned long i; if (n <= 0) return; for (i = 0; i < n; ++i) buff[i] = table[buff[i]]; } int audio_write(int dev, struct file *file, const char __user *buf, int count) { int c, p, l, buf_size, used, returned; int err; char *dma_buf; dev = dev >> 4; p = 0; c = count; if(count < 0) return -EINVAL; if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EPERM; if (audio_devs[dev]->flags & DMA_DUPLEX) audio_devs[dev]->audio_mode |= AM_WRITE; else audio_devs[dev]->audio_mode = AM_WRITE; if (!count) /* Flush output */ { sync_output(dev); return 0; } while (c) { if ((err = DMAbuf_getwrbuffer(dev, &dma_buf, &buf_size, !!(file->f_flags & O_NONBLOCK))) < 0) { /* Handle nonblocking mode */ if ((file->f_flags & O_NONBLOCK) && err == -EAGAIN) return p? p : -EAGAIN; /* No more space. Return # of accepted bytes */ return err; } l = c; if (l > buf_size) l = buf_size; returned = l; used = l; if (!audio_devs[dev]->d->copy_user) { if ((dma_buf + l) > (audio_devs[dev]->dmap_out->raw_buf + audio_devs[dev]->dmap_out->buffsize)) { printk(KERN_ERR "audio: Buffer error 3 (%lx,%d), (%lx, %d)\n", (long) dma_buf, l, (long) audio_devs[dev]->dmap_out->raw_buf, (int) audio_devs[dev]->dmap_out->buffsize); return -EDOM; } if (dma_buf < audio_devs[dev]->dmap_out->raw_buf) { printk(KERN_ERR "audio: Buffer error 13 (%lx<%lx)\n", (long) dma_buf, (long) audio_devs[dev]->dmap_out->raw_buf); return -EDOM; } if(copy_from_user(dma_buf, &(buf)[p], l)) return -EFAULT; } else audio_devs[dev]->d->copy_user (dev, dma_buf, 0, buf, p, c, buf_size, &used, &returned, l); l = returned; if (audio_devs[dev]->local_conversion & CNV_MU_LAW) { translate_bytes(ulaw_dsp, (unsigned char *) dma_buf, l); } c -= used; p += used; DMAbuf_move_wrpointer(dev, l); } return count; } int audio_read(int dev, struct file *file, char __user *buf, int count) { int c, p, l; char *dmabuf; int buf_no; dev = dev >> 4; p = 0; c = count; if (!(audio_devs[dev]->open_mode & OPEN_READ)) return -EPERM; if ((audio_devs[dev]->audio_mode & AM_WRITE) && !(audio_devs[dev]->flags & DMA_DUPLEX)) sync_output(dev); if (audio_devs[dev]->flags & DMA_DUPLEX) audio_devs[dev]->audio_mode |= AM_READ; else audio_devs[dev]->audio_mode = AM_READ; while(c) { if ((buf_no = DMAbuf_getrdbuffer(dev, &dmabuf, &l, !!(file->f_flags & O_NONBLOCK))) < 0) { /* * Nonblocking mode handling. Return current # of bytes */ if (p > 0) /* Avoid throwing away data */ return p; /* Return it instead */ if ((file->f_flags & O_NONBLOCK) && buf_no == -EAGAIN) return -EAGAIN; return buf_no; } if (l > c) l = c; /* * Insert any local processing here. */ if (audio_devs[dev]->local_conversion & CNV_MU_LAW) { translate_bytes(dsp_ulaw, (unsigned char *) dmabuf, l); } { char *fixit = dmabuf; if(copy_to_user(&(buf)[p], fixit, l)) return -EFAULT; }; DMAbuf_rmchars(dev, buf_no, l); p += l; c -= l; } return count - c; } int audio_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg) { int val, count; unsigned long flags; struct dma_buffparms *dmap; int __user *p = arg; dev = dev >> 4; if (_IOC_TYPE(cmd) == 'C') { if (audio_devs[dev]->coproc) /* Coprocessor ioctl */ return audio_devs[dev]->coproc->ioctl(audio_devs[dev]->coproc->devc, cmd, arg, 0); /* else printk(KERN_DEBUG"/dev/dsp%d: No coprocessor for this device\n", dev); */ return -ENXIO; } else switch (cmd) { case SNDCTL_DSP_SYNC: if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return 0; if (audio_devs[dev]->dmap_out->fragment_size == 0) return 0; sync_output(dev); DMAbuf_sync(dev); DMAbuf_reset(dev); return 0; case SNDCTL_DSP_POST: if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return 0; if (audio_devs[dev]->dmap_out->fragment_size == 0) return 0; audio_devs[dev]->dmap_out->flags |= DMA_POST | DMA_DIRTY; sync_output(dev); dma_ioctl(dev, SNDCTL_DSP_POST, NULL); return 0; case SNDCTL_DSP_RESET: audio_devs[dev]->audio_mode = AM_NONE; DMAbuf_reset(dev); return 0; case SNDCTL_DSP_GETFMTS: val = audio_devs[dev]->format_mask | AFMT_MU_LAW; break; case SNDCTL_DSP_SETFMT: if (get_user(val, p)) return -EFAULT; val = set_format(dev, val); break; case SNDCTL_DSP_GETISPACE: if (!(audio_devs[dev]->open_mode & OPEN_READ)) return 0; if ((audio_devs[dev]->audio_mode & AM_WRITE) && !(audio_devs[dev]->flags & DMA_DUPLEX)) return -EBUSY; return dma_ioctl(dev, cmd, arg); case SNDCTL_DSP_GETOSPACE: if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EPERM; if ((audio_devs[dev]->audio_mode & AM_READ) && !(audio_devs[dev]->flags & DMA_DUPLEX)) return -EBUSY; return dma_ioctl(dev, cmd, arg); case SNDCTL_DSP_NONBLOCK: spin_lock(&file->f_lock); file->f_flags |= O_NONBLOCK; spin_unlock(&file->f_lock); return 0; case SNDCTL_DSP_GETCAPS: val = 1 | DSP_CAP_MMAP; /* Revision level of this ioctl() */ if (audio_devs[dev]->flags & DMA_DUPLEX && audio_devs[dev]->open_mode == OPEN_READWRITE) val |= DSP_CAP_DUPLEX; if (audio_devs[dev]->coproc) val |= DSP_CAP_COPROC; if (audio_devs[dev]->d->local_qlen) /* Device has hidden buffers */ val |= DSP_CAP_BATCH; if (audio_devs[dev]->d->trigger) /* Supports SETTRIGGER */ val |= DSP_CAP_TRIGGER; break; case SOUND_PCM_WRITE_RATE: if (get_user(val, p)) return -EFAULT; val = audio_devs[dev]->d->set_speed(dev, val); break; case SOUND_PCM_READ_RATE: val = audio_devs[dev]->d->set_speed(dev, 0); break; case SNDCTL_DSP_STEREO: if (get_user(val, p)) return -EFAULT; if (val > 1 || val < 0) return -EINVAL; val = audio_devs[dev]->d->set_channels(dev, val + 1) - 1; break; case SOUND_PCM_WRITE_CHANNELS: if (get_user(val, p)) return -EFAULT; val = audio_devs[dev]->d->set_channels(dev, val); break; case SOUND_PCM_READ_CHANNELS: val = audio_devs[dev]->d->set_channels(dev, 0); break; case SOUND_PCM_READ_BITS: val = audio_devs[dev]->d->set_bits(dev, 0); break; case SNDCTL_DSP_SETDUPLEX: if (audio_devs[dev]->open_mode != OPEN_READWRITE) return -EPERM; return (audio_devs[dev]->flags & DMA_DUPLEX) ? 0 : -EIO; case SNDCTL_DSP_PROFILE: if (get_user(val, p)) return -EFAULT; if (audio_devs[dev]->open_mode & OPEN_WRITE) audio_devs[dev]->dmap_out->applic_profile = val; if (audio_devs[dev]->open_mode & OPEN_READ) audio_devs[dev]->dmap_in->applic_profile = val; return 0; case SNDCTL_DSP_GETODELAY: dmap = audio_devs[dev]->dmap_out; if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EINVAL; if (!(dmap->flags & DMA_ALLOC_DONE)) { val=0; break; } spin_lock_irqsave(&dmap->lock,flags); /* Compute number of bytes that have been played */ count = DMAbuf_get_buffer_pointer (dev, dmap, DMODE_OUTPUT); if (count < dmap->fragment_size && dmap->qhead != 0) count += dmap->bytes_in_use; /* Pointer wrap not handled yet */ count += dmap->byte_counter; /* Subtract current count from the number of bytes written by app */ count = dmap->user_counter - count; if (count < 0) count = 0; spin_unlock_irqrestore(&dmap->lock,flags); val = count; break; default: return dma_ioctl(dev, cmd, arg); } return put_user(val, p); } void audio_init_devices(void) { /* * NOTE! This routine could be called several times during boot. */ } void reorganize_buffers(int dev, struct dma_buffparms *dmap, int recording) { /* * This routine breaks the physical device buffers to logical ones. */ struct audio_operations *dsp_dev = audio_devs[dev]; unsigned i, n; unsigned sr, nc, sz, bsz; sr = dsp_dev->d->set_speed(dev, 0); nc = dsp_dev->d->set_channels(dev, 0); sz = dsp_dev->d->set_bits(dev, 0); if (sz == 8) dmap->neutral_byte = NEUTRAL8; else dmap->neutral_byte = NEUTRAL16; if (sr < 1 || nc < 1 || sz < 1) { /* printk(KERN_DEBUG "Warning: Invalid PCM parameters[%d] sr=%d, nc=%d, sz=%d\n", dev, sr, nc, sz);*/ sr = DSP_DEFAULT_SPEED; nc = 1; sz = 8; } sz = sr * nc * sz; sz /= 8; /* #bits -> #bytes */ dmap->data_rate = sz; if (!dmap->needs_reorg) return; dmap->needs_reorg = 0; if (dmap->fragment_size == 0) { /* Compute the fragment size using the default algorithm */ /* * Compute a buffer size for time not exceeding 1 second. * Usually this algorithm gives a buffer size for 0.5 to 1.0 seconds * of sound (using the current speed, sample size and #channels). */ bsz = dmap->buffsize; while (bsz > sz) bsz /= 2; if (bsz == dmap->buffsize) bsz /= 2; /* Needs at least 2 buffers */ /* * Split the computed fragment to smaller parts. After 3.5a9 * the default subdivision is 4 which should give better * results when recording. */ if (dmap->subdivision == 0) /* Not already set */ { dmap->subdivision = 4; /* Init to the default value */ if ((bsz / dmap->subdivision) > 4096) dmap->subdivision *= 2; if ((bsz / dmap->subdivision) < 4096) dmap->subdivision = 1; } bsz /= dmap->subdivision; if (bsz < 16) bsz = 16; /* Just a sanity check */ dmap->fragment_size = bsz; } else { /* * The process has specified the buffer size with SNDCTL_DSP_SETFRAGMENT or * the buffer size computation has already been done. */ if (dmap->fragment_size > (dmap->buffsize / 2)) dmap->fragment_size = (dmap->buffsize / 2); bsz = dmap->fragment_size; } if (audio_devs[dev]->min_fragment) if (bsz < (1 << audio_devs[dev]->min_fragment)) bsz = 1 << audio_devs[dev]->min_fragment; if (audio_devs[dev]->max_fragment) if (bsz > (1 << audio_devs[dev]->max_fragment)) bsz = 1 << audio_devs[dev]->max_fragment; bsz &= ~0x07; /* Force size which is multiple of 8 bytes */ #ifdef OS_DMA_ALIGN_CHECK OS_DMA_ALIGN_CHECK(bsz); #endif n = dmap->buffsize / bsz; if (n > MAX_SUB_BUFFERS) n = MAX_SUB_BUFFERS; if (n > dmap->max_fragments) n = dmap->max_fragments; if (n < 2) { n = 2; bsz /= 2; } dmap->nbufs = n; dmap->bytes_in_use = n * bsz; dmap->fragment_size = bsz; dmap->max_byte_counter = (dmap->data_rate * 60 * 60) + dmap->bytes_in_use; /* Approximately one hour */ if (dmap->raw_buf) { memset(dmap->raw_buf, dmap->neutral_byte, dmap->bytes_in_use); } for (i = 0; i < dmap->nbufs; i++) { dmap->counts[i] = 0; } dmap->flags |= DMA_ALLOC_DONE | DMA_EMPTY; } static int dma_subdivide(int dev, struct dma_buffparms *dmap, int fact) { if (fact == 0) { fact = dmap->subdivision; if (fact == 0) fact = 1; return fact; } if (dmap->subdivision != 0 || dmap->fragment_size) /* Too late to change */ return -EINVAL; if (fact > MAX_REALTIME_FACTOR) return -EINVAL; if (fact != 1 && fact != 2 && fact != 4 && fact != 8 && fact != 16) return -EINVAL; dmap->subdivision = fact; return fact; } static int dma_set_fragment(int dev, struct dma_buffparms *dmap, int fact) { int bytes, count; if (fact == 0) return -EIO; if (dmap->subdivision != 0 || dmap->fragment_size) /* Too late to change */ return -EINVAL; bytes = fact & 0xffff; count = (fact >> 16) & 0x7fff; if (count == 0) count = MAX_SUB_BUFFERS; else if (count < MAX_SUB_BUFFERS) count++; if (bytes < 4 || bytes > 17) /* <16 || > 512k */ return -EINVAL; if (count < 2) return -EINVAL; if (audio_devs[dev]->min_fragment > 0) if (bytes < audio_devs[dev]->min_fragment) bytes = audio_devs[dev]->min_fragment; if (audio_devs[dev]->max_fragment > 0) if (bytes > audio_devs[dev]->max_fragment) bytes = audio_devs[dev]->max_fragment; #ifdef OS_DMA_MINBITS if (bytes < OS_DMA_MINBITS) bytes = OS_DMA_MINBITS; #endif dmap->fragment_size = (1 << bytes); dmap->max_fragments = count; if (dmap->fragment_size > dmap->buffsize) dmap->fragment_size = dmap->buffsize; if (dmap->fragment_size == dmap->buffsize && audio_devs[dev]->flags & DMA_AUTOMODE) dmap->fragment_size /= 2; /* Needs at least 2 buffers */ dmap->subdivision = 1; /* Disable SNDCTL_DSP_SUBDIVIDE */ return bytes | ((count - 1) << 16); } static int dma_ioctl(int dev, unsigned int cmd, void __user *arg) { struct dma_buffparms *dmap_out = audio_devs[dev]->dmap_out; struct dma_buffparms *dmap_in = audio_devs[dev]->dmap_in; struct dma_buffparms *dmap; audio_buf_info info; count_info cinfo; int fact, ret, changed, bits, count, err; unsigned long flags; switch (cmd) { case SNDCTL_DSP_SUBDIVIDE: ret = 0; if (get_user(fact, (int __user *)arg)) return -EFAULT; if (audio_devs[dev]->open_mode & OPEN_WRITE) ret = dma_subdivide(dev, dmap_out, fact); if (ret < 0) return ret; if (audio_devs[dev]->open_mode != OPEN_WRITE || (audio_devs[dev]->flags & DMA_DUPLEX && audio_devs[dev]->open_mode & OPEN_READ)) ret = dma_subdivide(dev, dmap_in, fact); if (ret < 0) return ret; break; case SNDCTL_DSP_GETISPACE: case SNDCTL_DSP_GETOSPACE: dmap = dmap_out; if (cmd == SNDCTL_DSP_GETISPACE && !(audio_devs[dev]->open_mode & OPEN_READ)) return -EINVAL; if (cmd == SNDCTL_DSP_GETOSPACE && !(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EINVAL; if (cmd == SNDCTL_DSP_GETISPACE && audio_devs[dev]->flags & DMA_DUPLEX) dmap = dmap_in; if (dmap->mapping_flags & DMA_MAP_MAPPED) return -EINVAL; if (!(dmap->flags & DMA_ALLOC_DONE)) reorganize_buffers(dev, dmap, (cmd == SNDCTL_DSP_GETISPACE)); info.fragstotal = dmap->nbufs; if (cmd == SNDCTL_DSP_GETISPACE) info.fragments = dmap->qlen; else { if (!DMAbuf_space_in_queue(dev)) info.fragments = 0; else { info.fragments = DMAbuf_space_in_queue(dev); if (audio_devs[dev]->d->local_qlen) { int tmp = audio_devs[dev]->d->local_qlen(dev); if (tmp && info.fragments) tmp--; /* * This buffer has been counted twice */ info.fragments -= tmp; } } } if (info.fragments < 0) info.fragments = 0; else if (info.fragments > dmap->nbufs) info.fragments = dmap->nbufs; info.fragsize = dmap->fragment_size; info.bytes = info.fragments * dmap->fragment_size; if (cmd == SNDCTL_DSP_GETISPACE && dmap->qlen) info.bytes -= dmap->counts[dmap->qhead]; else { info.fragments = info.bytes / dmap->fragment_size; info.bytes -= dmap->user_counter % dmap->fragment_size; } if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; case SNDCTL_DSP_SETTRIGGER: if (get_user(bits, (int __user *)arg)) return -EFAULT; bits &= audio_devs[dev]->open_mode; if (audio_devs[dev]->d->trigger == NULL) return -EINVAL; if (!(audio_devs[dev]->flags & DMA_DUPLEX) && (bits & PCM_ENABLE_INPUT) && (bits & PCM_ENABLE_OUTPUT)) return -EINVAL; if (bits & PCM_ENABLE_INPUT) { spin_lock_irqsave(&dmap_in->lock,flags); changed = (audio_devs[dev]->enable_bits ^ bits) & PCM_ENABLE_INPUT; if (changed && audio_devs[dev]->go) { reorganize_buffers(dev, dmap_in, 1); if ((err = audio_devs[dev]->d->prepare_for_input(dev, dmap_in->fragment_size, dmap_in->nbufs)) < 0) { spin_unlock_irqrestore(&dmap_in->lock,flags); return err; } dmap_in->dma_mode = DMODE_INPUT; audio_devs[dev]->enable_bits |= PCM_ENABLE_INPUT; DMAbuf_activate_recording(dev, dmap_in); } else audio_devs[dev]->enable_bits &= ~PCM_ENABLE_INPUT; spin_unlock_irqrestore(&dmap_in->lock,flags); } if (bits & PCM_ENABLE_OUTPUT) { spin_lock_irqsave(&dmap_out->lock,flags); changed = (audio_devs[dev]->enable_bits ^ bits) & PCM_ENABLE_OUTPUT; if (changed && (dmap_out->mapping_flags & DMA_MAP_MAPPED || dmap_out->qlen > 0) && audio_devs[dev]->go) { if (!(dmap_out->flags & DMA_ALLOC_DONE)) reorganize_buffers(dev, dmap_out, 0); dmap_out->dma_mode = DMODE_OUTPUT; audio_devs[dev]->enable_bits |= PCM_ENABLE_OUTPUT; dmap_out->counts[dmap_out->qhead] = dmap_out->fragment_size; DMAbuf_launch_output(dev, dmap_out); } else audio_devs[dev]->enable_bits &= ~PCM_ENABLE_OUTPUT; spin_unlock_irqrestore(&dmap_out->lock,flags); } #if 0 if (changed && audio_devs[dev]->d->trigger) audio_devs[dev]->d->trigger(dev, bits * audio_devs[dev]->go); #endif /* Falls through... */ case SNDCTL_DSP_GETTRIGGER: ret = audio_devs[dev]->enable_bits; break; case SNDCTL_DSP_SETSYNCRO: if (!audio_devs[dev]->d->trigger) return -EINVAL; audio_devs[dev]->d->trigger(dev, 0); audio_devs[dev]->go = 0; return 0; case SNDCTL_DSP_GETIPTR: if (!(audio_devs[dev]->open_mode & OPEN_READ)) return -EINVAL; spin_lock_irqsave(&dmap_in->lock,flags); cinfo.bytes = dmap_in->byte_counter; cinfo.ptr = DMAbuf_get_buffer_pointer(dev, dmap_in, DMODE_INPUT) & ~3; if (cinfo.ptr < dmap_in->fragment_size && dmap_in->qtail != 0) cinfo.bytes += dmap_in->bytes_in_use; /* Pointer wrap not handled yet */ cinfo.blocks = dmap_in->qlen; cinfo.bytes += cinfo.ptr; if (dmap_in->mapping_flags & DMA_MAP_MAPPED) dmap_in->qlen = 0; /* Reset interrupt counter */ spin_unlock_irqrestore(&dmap_in->lock,flags); if (copy_to_user(arg, &cinfo, sizeof(cinfo))) return -EFAULT; return 0; case SNDCTL_DSP_GETOPTR: if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EINVAL; spin_lock_irqsave(&dmap_out->lock,flags); cinfo.bytes = dmap_out->byte_counter; cinfo.ptr = DMAbuf_get_buffer_pointer(dev, dmap_out, DMODE_OUTPUT) & ~3; if (cinfo.ptr < dmap_out->fragment_size && dmap_out->qhead != 0) cinfo.bytes += dmap_out->bytes_in_use; /* Pointer wrap not handled yet */ cinfo.blocks = dmap_out->qlen; cinfo.bytes += cinfo.ptr; if (dmap_out->mapping_flags & DMA_MAP_MAPPED) dmap_out->qlen = 0; /* Reset interrupt counter */ spin_unlock_irqrestore(&dmap_out->lock,flags); if (copy_to_user(arg, &cinfo, sizeof(cinfo))) return -EFAULT; return 0; case SNDCTL_DSP_GETODELAY: if (!(audio_devs[dev]->open_mode & OPEN_WRITE)) return -EINVAL; if (!(dmap_out->flags & DMA_ALLOC_DONE)) { ret=0; break; } spin_lock_irqsave(&dmap_out->lock,flags); /* Compute number of bytes that have been played */ count = DMAbuf_get_buffer_pointer (dev, dmap_out, DMODE_OUTPUT); if (count < dmap_out->fragment_size && dmap_out->qhead != 0) count += dmap_out->bytes_in_use; /* Pointer wrap not handled yet */ count += dmap_out->byte_counter; /* Subtract current count from the number of bytes written by app */ count = dmap_out->user_counter - count; if (count < 0) count = 0; spin_unlock_irqrestore(&dmap_out->lock,flags); ret = count; break; case SNDCTL_DSP_POST: if (audio_devs[dev]->dmap_out->qlen > 0) if (!(audio_devs[dev]->dmap_out->flags & DMA_ACTIVE)) DMAbuf_launch_output(dev, audio_devs[dev]->dmap_out); return 0; case SNDCTL_DSP_GETBLKSIZE: dmap = dmap_out; if (audio_devs[dev]->open_mode & OPEN_WRITE) reorganize_buffers(dev, dmap_out, (audio_devs[dev]->open_mode == OPEN_READ)); if (audio_devs[dev]->open_mode == OPEN_READ || (audio_devs[dev]->flags & DMA_DUPLEX && audio_devs[dev]->open_mode & OPEN_READ)) reorganize_buffers(dev, dmap_in, (audio_devs[dev]->open_mode == OPEN_READ)); if (audio_devs[dev]->open_mode == OPEN_READ) dmap = dmap_in; ret = dmap->fragment_size; break; case SNDCTL_DSP_SETFRAGMENT: ret = 0; if (get_user(fact, (int __user *)arg)) return -EFAULT; if (audio_devs[dev]->open_mode & OPEN_WRITE) ret = dma_set_fragment(dev, dmap_out, fact); if (ret < 0) return ret; if (audio_devs[dev]->open_mode == OPEN_READ || (audio_devs[dev]->flags & DMA_DUPLEX && audio_devs[dev]->open_mode & OPEN_READ)) ret = dma_set_fragment(dev, dmap_in, fact); if (ret < 0) return ret; if (!arg) /* don't know what this is good for, but preserve old semantics */ return 0; break; default: if (!audio_devs[dev]->d->ioctl) return -EINVAL; return audio_devs[dev]->d->ioctl(dev, cmd, arg); } return put_user(ret, (int __user *)arg); }
gpl-2.0