repo_name
string
path
string
copies
string
size
string
content
string
license
string
yajnab/android_kernel_htc_magnids
drivers/usb/atm/ueagle-atm.c
2523
69266
/*- * Copyright (c) 2003, 2004 * Damien Bergamini <damien.bergamini@free.fr>. All rights reserved. * * Copyright (c) 2005-2007 Matthieu Castet <castet.matthieu@free.fr> * Copyright (c) 2005-2007 Stanislaw Gruszka <stf_xl@wp.pl> * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GPL license : * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * HISTORY : some part of the code was base on ueagle 1.3 BSD driver, * Damien Bergamini agree to put his code under a DUAL GPL/BSD license. * * The rest of the code was was rewritten from scratch. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/usb.h> #include <linux/firmware.h> #include <linux/ctype.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/kernel.h> #include <asm/unaligned.h> #include "usbatm.h" #define EAGLEUSBVERSION "ueagle 1.4" /* * Debug macros */ #define uea_dbg(usb_dev, format, args...) \ do { \ if (debug >= 1) \ dev_dbg(&(usb_dev)->dev, \ "[ueagle-atm dbg] %s: " format, \ __func__, ##args); \ } while (0) #define uea_vdbg(usb_dev, format, args...) \ do { \ if (debug >= 2) \ dev_dbg(&(usb_dev)->dev, \ "[ueagle-atm vdbg] " format, ##args); \ } while (0) #define uea_enters(usb_dev) \ uea_vdbg(usb_dev, "entering %s\n" , __func__) #define uea_leaves(usb_dev) \ uea_vdbg(usb_dev, "leaving %s\n" , __func__) #define uea_err(usb_dev, format, args...) \ dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args) #define uea_warn(usb_dev, format, args...) \ dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args) #define uea_info(usb_dev, format, args...) \ dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args) struct intr_pkt; /* cmv's from firmware */ struct uea_cmvs_v1 { u32 address; u16 offset; u32 data; } __attribute__ ((packed)); struct uea_cmvs_v2 { u32 group; u32 address; u32 offset; u32 data; } __attribute__ ((packed)); /* information about currently processed cmv */ struct cmv_dsc_e1 { u8 function; u16 idx; u32 address; u16 offset; }; struct cmv_dsc_e4 { u16 function; u16 offset; u16 address; u16 group; }; union cmv_dsc { struct cmv_dsc_e1 e1; struct cmv_dsc_e4 e4; }; struct uea_softc { struct usb_device *usb_dev; struct usbatm_data *usbatm; int modem_index; unsigned int driver_info; int annex; #define ANNEXA 0 #define ANNEXB 1 int booting; int reset; wait_queue_head_t sync_q; struct task_struct *kthread; u32 data; u32 data1; int cmv_ack; union cmv_dsc cmv_dsc; struct work_struct task; u16 pageno; u16 ovl; const struct firmware *dsp_firm; struct urb *urb_int; void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *); void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *); int (*stat) (struct uea_softc *); int (*send_cmvs) (struct uea_softc *); /* keep in sync with eaglectl */ struct uea_stats { struct { u32 state; u32 flags; u32 mflags; u32 vidcpe; u32 vidco; u32 dsrate; u32 usrate; u32 dsunc; u32 usunc; u32 dscorr; u32 uscorr; u32 txflow; u32 rxflow; u32 usattenuation; u32 dsattenuation; u32 dsmargin; u32 usmargin; u32 firmid; } phy; } stats; }; /* * Elsa IDs */ #define ELSA_VID 0x05CC #define ELSA_PID_PSTFIRM 0x3350 #define ELSA_PID_PREFIRM 0x3351 #define ELSA_PID_A_PREFIRM 0x3352 #define ELSA_PID_A_PSTFIRM 0x3353 #define ELSA_PID_B_PREFIRM 0x3362 #define ELSA_PID_B_PSTFIRM 0x3363 /* * Devolo IDs : pots if (pid & 0x10) */ #define DEVOLO_VID 0x1039 #define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110 #define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111 #define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100 #define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101 #define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130 #define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131 #define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120 #define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121 /* * Reference design USB IDs */ #define ANALOG_VID 0x1110 #define ADI930_PID_PREFIRM 0x9001 #define ADI930_PID_PSTFIRM 0x9000 #define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */ #define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */ #define EAGLE_IIC_PID_PREFIRM 0x9024 /* Eagle IIC */ #define EAGLE_IIC_PID_PSTFIRM 0x9023 /* Eagle IIC */ #define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */ #define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */ #define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */ #define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */ #define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */ #define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */ /* * USR USB IDs */ #define USR_VID 0x0BAF #define MILLER_A_PID_PREFIRM 0x00F2 #define MILLER_A_PID_PSTFIRM 0x00F1 #define MILLER_B_PID_PREFIRM 0x00FA #define MILLER_B_PID_PSTFIRM 0x00F9 #define HEINEKEN_A_PID_PREFIRM 0x00F6 #define HEINEKEN_A_PID_PSTFIRM 0x00F5 #define HEINEKEN_B_PID_PREFIRM 0x00F8 #define HEINEKEN_B_PID_PSTFIRM 0x00F7 #define PREFIRM 0 #define PSTFIRM (1<<7) #define AUTO_ANNEX_A (1<<8) #define AUTO_ANNEX_B (1<<9) enum { ADI930 = 0, EAGLE_I, EAGLE_II, EAGLE_III, EAGLE_IV }; /* macros for both struct usb_device_id and struct uea_softc */ #define UEA_IS_PREFIRM(x) \ (!((x)->driver_info & PSTFIRM)) #define UEA_CHIP_VERSION(x) \ ((x)->driver_info & 0xf) #define IS_ISDN(x) \ ((x)->annex & ANNEXB) #define INS_TO_USBDEV(ins) (ins->usb_dev) #define GET_STATUS(data) \ ((data >> 8) & 0xf) #define IS_OPERATIONAL(sc) \ ((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \ (GET_STATUS(sc->stats.phy.state) == 2) : \ (sc->stats.phy.state == 7)) /* * Set of macros to handle unaligned data in the firmware blob. * The FW_GET_BYTE() macro is provided only for consistency. */ #define FW_GET_BYTE(p) (*((__u8 *) (p))) #define FW_DIR "ueagle-atm/" #define UEA_FW_NAME_MAX 30 #define NB_MODEM 4 #define BULK_TIMEOUT 300 #define CTRL_TIMEOUT 1000 #define ACK_TIMEOUT msecs_to_jiffies(3000) #define UEA_INTR_IFACE_NO 0 #define UEA_US_IFACE_NO 1 #define UEA_DS_IFACE_NO 2 #define FASTEST_ISO_INTF 8 #define UEA_BULK_DATA_PIPE 0x02 #define UEA_IDMA_PIPE 0x04 #define UEA_INTR_PIPE 0x04 #define UEA_ISO_DATA_PIPE 0x08 #define UEA_E1_SET_BLOCK 0x0001 #define UEA_E4_SET_BLOCK 0x002c #define UEA_SET_MODE 0x0003 #define UEA_SET_2183_DATA 0x0004 #define UEA_SET_TIMEOUT 0x0011 #define UEA_LOOPBACK_OFF 0x0002 #define UEA_LOOPBACK_ON 0x0003 #define UEA_BOOT_IDMA 0x0006 #define UEA_START_RESET 0x0007 #define UEA_END_RESET 0x0008 #define UEA_SWAP_MAILBOX (0x3fcd | 0x4000) #define UEA_MPTX_START (0x3fce | 0x4000) #define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000) #define UEA_MPRX_MAILBOX (0x3fdf | 0x4000) /* block information in eagle4 dsp firmware */ struct block_index { __le32 PageOffset; __le32 NotLastBlock; __le32 dummy; __le32 PageSize; __le32 PageAddress; __le16 dummy1; __le16 PageNumber; } __attribute__ ((packed)); #define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000) #define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4) #define E4_L1_STRING_HEADER 0x10 #define E4_MAX_PAGE_NUMBER 0x58 #define E4_NO_SWAPPAGE_HEADERS 0x31 /* l1_code is eagle4 dsp firmware format */ struct l1_code { u8 string_header[E4_L1_STRING_HEADER]; u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER]; struct block_index page_header[E4_NO_SWAPPAGE_HEADERS]; u8 code[0]; } __attribute__ ((packed)); /* structures describing a block within a DSP page */ struct block_info_e1 { __le16 wHdr; __le16 wAddress; __le16 wSize; __le16 wOvlOffset; __le16 wOvl; /* overlay */ __le16 wLast; } __attribute__ ((packed)); #define E1_BLOCK_INFO_SIZE 12 struct block_info_e4 { __be16 wHdr; __u8 bBootPage; __u8 bPageNumber; __be32 dwSize; __be32 dwAddress; __be16 wReserved; } __attribute__ ((packed)); #define E4_BLOCK_INFO_SIZE 14 #define UEA_BIHDR 0xabcd #define UEA_RESERVED 0xffff /* constants describing cmv type */ #define E1_PREAMBLE 0x535c #define E1_MODEMTOHOST 0x01 #define E1_HOSTTOMODEM 0x10 #define E1_MEMACCESS 0x1 #define E1_ADSLDIRECTIVE 0x7 #define E1_FUNCTION_TYPE(f) ((f) >> 4) #define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f) #define E4_MEMACCESS 0 #define E4_ADSLDIRECTIVE 0xf #define E4_FUNCTION_TYPE(f) ((f) >> 8) #define E4_FUNCTION_SIZE(f) ((f) & 0x0f) #define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f) /* for MEMACCESS */ #define E1_REQUESTREAD 0x0 #define E1_REQUESTWRITE 0x1 #define E1_REPLYREAD 0x2 #define E1_REPLYWRITE 0x3 #define E4_REQUESTREAD 0x0 #define E4_REQUESTWRITE 0x4 #define E4_REPLYREAD (E4_REQUESTREAD | 1) #define E4_REPLYWRITE (E4_REQUESTWRITE | 1) /* for ADSLDIRECTIVE */ #define E1_KERNELREADY 0x0 #define E1_MODEMREADY 0x1 #define E4_KERNELREADY 0x0 #define E4_MODEMREADY 0x1 #define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf)) #define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \ ((st) & 0xf) << 4 | ((s) & 0xf)) #define E1_MAKESA(a, b, c, d) \ (((c) & 0xff) << 24 | \ ((d) & 0xff) << 16 | \ ((a) & 0xff) << 8 | \ ((b) & 0xff)) #define E1_GETSA1(a) ((a >> 8) & 0xff) #define E1_GETSA2(a) (a & 0xff) #define E1_GETSA3(a) ((a >> 24) & 0xff) #define E1_GETSA4(a) ((a >> 16) & 0xff) #define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L') #define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G') #define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O') #define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N') #define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E') #define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T') #define E4_SA_CNTL 1 #define E4_SA_STAT 2 #define E4_SA_INFO 3 #define E4_SA_TEST 4 #define E4_SA_OPTN 5 #define E4_SA_RATE 6 #define E4_SA_DIAG 7 #define E4_SA_CNFG 8 /* structures representing a CMV (Configuration and Management Variable) */ struct cmv_e1 { __le16 wPreamble; __u8 bDirection; __u8 bFunction; __le16 wIndex; __le32 dwSymbolicAddress; __le16 wOffsetAddress; __le32 dwData; } __attribute__ ((packed)); struct cmv_e4 { __be16 wGroup; __be16 wFunction; __be16 wOffset; __be16 wAddress; __be32 dwData[6]; } __attribute__ ((packed)); /* structures representing swap information */ struct swap_info_e1 { __u8 bSwapPageNo; __u8 bOvl; /* overlay */ } __attribute__ ((packed)); struct swap_info_e4 { __u8 bSwapPageNo; } __attribute__ ((packed)); /* structures representing interrupt data */ #define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo #define e1_bOvl u.e1.s1.swapinfo.bOvl #define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo #define INT_LOADSWAPPAGE 0x0001 #define INT_INCOMINGCMV 0x0002 union intr_data_e1 { struct { struct swap_info_e1 swapinfo; __le16 wDataSize; } __attribute__ ((packed)) s1; struct { struct cmv_e1 cmv; __le16 wDataSize; } __attribute__ ((packed)) s2; } __attribute__ ((packed)); union intr_data_e4 { struct { struct swap_info_e4 swapinfo; __le16 wDataSize; } __attribute__ ((packed)) s1; struct { struct cmv_e4 cmv; __le16 wDataSize; } __attribute__ ((packed)) s2; } __attribute__ ((packed)); struct intr_pkt { __u8 bType; __u8 bNotification; __le16 wValue; __le16 wIndex; __le16 wLength; __le16 wInterrupt; union { union intr_data_e1 e1; union intr_data_e4 e4; } u; } __attribute__ ((packed)); #define E1_INTR_PKT_SIZE 28 #define E4_INTR_PKT_SIZE 64 static struct usb_driver uea_driver; static DEFINE_MUTEX(uea_mutex); static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"}; static int modem_index; static unsigned int debug; static unsigned int altsetting[NB_MODEM] = { [0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF}; static int sync_wait[NB_MODEM]; static char *cmv_file[NB_MODEM]; static int annex[NB_MODEM]; module_param(debug, uint, 0644); MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)"); module_param_array(altsetting, uint, NULL, 0644); MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, " "1=isoc slowest, ... , 8=isoc fastest (default)"); module_param_array(sync_wait, bool, NULL, 0644); MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM"); module_param_array(cmv_file, charp, NULL, 0644); MODULE_PARM_DESC(cmv_file, "file name with configuration and management variables"); module_param_array(annex, uint, NULL, 0644); MODULE_PARM_DESC(annex, "manually set annex a/b (0=auto, 1=annex a, 2=annex b)"); #define uea_wait(sc, cond, timeo) \ ({ \ int _r = wait_event_interruptible_timeout(sc->sync_q, \ (cond) || kthread_should_stop(), timeo); \ if (kthread_should_stop()) \ _r = -ENODEV; \ _r; \ }) #define UPDATE_ATM_STAT(type, val) \ do { \ if (sc->usbatm->atm_dev) \ sc->usbatm->atm_dev->type = val; \ } while (0) #define UPDATE_ATM_SIGNAL(val) \ do { \ if (sc->usbatm->atm_dev) \ atm_dev_signal_change(sc->usbatm->atm_dev, val); \ } while (0) /* Firmware loading */ #define LOAD_INTERNAL 0xA0 #define F8051_USBCS 0x7f92 /** * uea_send_modem_cmd - Send a command for pre-firmware devices. */ static int uea_send_modem_cmd(struct usb_device *usb, u16 addr, u16 size, const u8 *buff) { int ret = -ENOMEM; u8 *xfer_buff; xfer_buff = kmemdup(buff, size, GFP_KERNEL); if (xfer_buff) { ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), LOAD_INTERNAL, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, addr, 0, xfer_buff, size, CTRL_TIMEOUT); kfree(xfer_buff); } if (ret < 0) return ret; return (ret == size) ? 0 : -EIO; } static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *context) { struct usb_device *usb = context; const u8 *pfw; u8 value; u32 crc = 0; int ret, size; uea_enters(usb); if (!fw_entry) { uea_err(usb, "firmware is not available\n"); goto err; } pfw = fw_entry->data; size = fw_entry->size; if (size < 4) goto err_fw_corrupted; crc = get_unaligned_le32(pfw); pfw += 4; size -= 4; if (crc32_be(0, pfw, size) != crc) goto err_fw_corrupted; /* * Start to upload firmware : send reset */ value = 1; ret = uea_send_modem_cmd(usb, F8051_USBCS, sizeof(value), &value); if (ret < 0) { uea_err(usb, "modem reset failed with error %d\n", ret); goto err; } while (size > 3) { u8 len = FW_GET_BYTE(pfw); u16 add = get_unaligned_le16(pfw + 1); size -= len + 3; if (size < 0) goto err_fw_corrupted; ret = uea_send_modem_cmd(usb, add, len, pfw + 3); if (ret < 0) { uea_err(usb, "uploading firmware data failed " "with error %d\n", ret); goto err; } pfw += len + 3; } if (size != 0) goto err_fw_corrupted; /* * Tell the modem we finish : de-assert reset */ value = 0; ret = uea_send_modem_cmd(usb, F8051_USBCS, 1, &value); if (ret < 0) uea_err(usb, "modem de-assert failed with error %d\n", ret); else uea_info(usb, "firmware uploaded\n"); goto err; err_fw_corrupted: uea_err(usb, "firmware is corrupted\n"); err: release_firmware(fw_entry); uea_leaves(usb); } /** * uea_load_firmware - Load usb firmware for pre-firmware devices. */ static int uea_load_firmware(struct usb_device *usb, unsigned int ver) { int ret; char *fw_name = FW_DIR "eagle.fw"; uea_enters(usb); uea_info(usb, "pre-firmware device, uploading firmware\n"); switch (ver) { case ADI930: fw_name = FW_DIR "adi930.fw"; break; case EAGLE_I: fw_name = FW_DIR "eagleI.fw"; break; case EAGLE_II: fw_name = FW_DIR "eagleII.fw"; break; case EAGLE_III: fw_name = FW_DIR "eagleIII.fw"; break; case EAGLE_IV: fw_name = FW_DIR "eagleIV.fw"; break; } ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev, GFP_KERNEL, usb, uea_upload_pre_firmware); if (ret) uea_err(usb, "firmware %s is not available\n", fw_name); else uea_info(usb, "loading firmware %s\n", fw_name); uea_leaves(usb); return ret; } /* modem management : dsp firmware, send/read CMV, monitoring statistic */ /* * Make sure that the DSP code provided is safe to use. */ static int check_dsp_e1(const u8 *dsp, unsigned int len) { u8 pagecount, blockcount; u16 blocksize; u32 pageoffset; unsigned int i, j, p, pp; pagecount = FW_GET_BYTE(dsp); p = 1; /* enough space for page offsets? */ if (p + 4 * pagecount > len) return 1; for (i = 0; i < pagecount; i++) { pageoffset = get_unaligned_le32(dsp + p); p += 4; if (pageoffset == 0) continue; /* enough space for blockcount? */ if (pageoffset >= len) return 1; pp = pageoffset; blockcount = FW_GET_BYTE(dsp + pp); pp += 1; for (j = 0; j < blockcount; j++) { /* enough space for block header? */ if (pp + 4 > len) return 1; pp += 2; /* skip blockaddr */ blocksize = get_unaligned_le16(dsp + pp); pp += 2; /* enough space for block data? */ if (pp + blocksize > len) return 1; pp += blocksize; } } return 0; } static int check_dsp_e4(const u8 *dsp, int len) { int i; struct l1_code *p = (struct l1_code *) dsp; unsigned int sum = p->code - dsp; if (len < sum) return 1; if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 && strcmp("STRATIPHY ANEXB", p->string_header) != 0) return 1; for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) { struct block_index *blockidx; u8 blockno = p->page_number_to_block_index[i]; if (blockno >= E4_NO_SWAPPAGE_HEADERS) continue; do { u64 l; if (blockno >= E4_NO_SWAPPAGE_HEADERS) return 1; blockidx = &p->page_header[blockno++]; if ((u8 *)(blockidx + 1) - dsp >= len) return 1; if (le16_to_cpu(blockidx->PageNumber) != i) return 1; l = E4_PAGE_BYTES(blockidx->PageSize); sum += l; l += le32_to_cpu(blockidx->PageOffset); if (l > len) return 1; /* zero is zero regardless endianes */ } while (blockidx->NotLastBlock); } return (sum == len) ? 0 : 1; } /* * send data to the idma pipe * */ static int uea_idma_write(struct uea_softc *sc, const void *data, u32 size) { int ret = -ENOMEM; u8 *xfer_buff; int bytes_read; xfer_buff = kmemdup(data, size, GFP_KERNEL); if (!xfer_buff) { uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n"); return ret; } ret = usb_bulk_msg(sc->usb_dev, usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE), xfer_buff, size, &bytes_read, BULK_TIMEOUT); kfree(xfer_buff); if (ret < 0) return ret; if (size != bytes_read) { uea_err(INS_TO_USBDEV(sc), "size != bytes_read %d %d\n", size, bytes_read); return -EIO; } return 0; } static int request_dsp(struct uea_softc *sc) { int ret; char *dsp_name; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSP4i.bin"; else dsp_name = FW_DIR "DSP4p.bin"; } else if (UEA_CHIP_VERSION(sc) == ADI930) { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSP9i.bin"; else dsp_name = FW_DIR "DSP9p.bin"; } else { if (IS_ISDN(sc)) dsp_name = FW_DIR "DSPei.bin"; else dsp_name = FW_DIR "DSPep.bin"; } ret = request_firmware(&sc->dsp_firm, dsp_name, &sc->usb_dev->dev); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", dsp_name, ret); return ret; } if (UEA_CHIP_VERSION(sc) == EAGLE_IV) ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size); else ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size); if (ret) { uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", dsp_name); release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; return -EILSEQ; } return 0; } /* * The uea_load_page() function must be called within a process context */ static void uea_load_page_e1(struct work_struct *work) { struct uea_softc *sc = container_of(work, struct uea_softc, task); u16 pageno = sc->pageno; u16 ovl = sc->ovl; struct block_info_e1 bi; const u8 *p; u8 pagecount, blockcount; u16 blockaddr, blocksize; u32 pageoffset; int i; /* reload firmware when reboot start and it's loaded already */ if (ovl == 0 && pageno == 0 && sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } if (sc->dsp_firm == NULL && request_dsp(sc) < 0) return; p = sc->dsp_firm->data; pagecount = FW_GET_BYTE(p); p += 1; if (pageno >= pagecount) goto bad1; p += 4 * pageno; pageoffset = get_unaligned_le32(p); if (pageoffset == 0) goto bad1; p = sc->dsp_firm->data + pageoffset; blockcount = FW_GET_BYTE(p); p += 1; uea_dbg(INS_TO_USBDEV(sc), "sending %u blocks for DSP page %u\n", blockcount, pageno); bi.wHdr = cpu_to_le16(UEA_BIHDR); bi.wOvl = cpu_to_le16(ovl); bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); for (i = 0; i < blockcount; i++) { blockaddr = get_unaligned_le16(p); p += 2; blocksize = get_unaligned_le16(p); p += 2; bi.wSize = cpu_to_le16(blocksize); bi.wAddress = cpu_to_le16(blockaddr); bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE)) goto bad2; /* send block data through the IDMA pipe */ if (uea_idma_write(sc, p, blocksize)) goto bad2; p += blocksize; } return; bad2: uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", i); return; bad1: uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno); } static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot) { struct block_info_e4 bi; struct block_index *blockidx; struct l1_code *p = (struct l1_code *) sc->dsp_firm->data; u8 blockno = p->page_number_to_block_index[pageno]; bi.wHdr = cpu_to_be16(UEA_BIHDR); bi.bBootPage = boot; bi.bPageNumber = pageno; bi.wReserved = cpu_to_be16(UEA_RESERVED); do { const u8 *blockoffset; unsigned int blocksize; blockidx = &p->page_header[blockno]; blocksize = E4_PAGE_BYTES(blockidx->PageSize); blockoffset = sc->dsp_firm->data + le32_to_cpu( blockidx->PageOffset); bi.dwSize = cpu_to_be32(blocksize); bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress)); uea_dbg(INS_TO_USBDEV(sc), "sending block %u for DSP page " "%u size %u address %x\n", blockno, pageno, blocksize, le32_to_cpu(blockidx->PageAddress)); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE)) goto bad; /* send block data through the IDMA pipe */ if (uea_idma_write(sc, blockoffset, blocksize)) goto bad; blockno++; } while (blockidx->NotLastBlock); return; bad: uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno); return; } static void uea_load_page_e4(struct work_struct *work) { struct uea_softc *sc = container_of(work, struct uea_softc, task); u8 pageno = sc->pageno; int i; struct block_info_e4 bi; struct l1_code *p; uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno); /* reload firmware when reboot start and it's loaded already */ if (pageno == 0 && sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } if (sc->dsp_firm == NULL && request_dsp(sc) < 0) return; p = (struct l1_code *) sc->dsp_firm->data; if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) { uea_err(INS_TO_USBDEV(sc), "invalid DSP " "page %u requested\n", pageno); return; } if (pageno != 0) { __uea_load_page_e4(sc, pageno, 0); return; } uea_dbg(INS_TO_USBDEV(sc), "sending Main DSP page %u\n", p->page_header[0].PageNumber); for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) { if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize)) __uea_load_page_e4(sc, i, 1); } uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n"); bi.wHdr = cpu_to_be16(UEA_BIHDR); bi.bBootPage = 0; bi.bPageNumber = 0xff; bi.wReserved = cpu_to_be16(UEA_RESERVED); bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize)); bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress)); /* send block info through the IDMA pipe */ if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE)) uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n"); } static inline void wake_up_cmv_ack(struct uea_softc *sc) { BUG_ON(sc->cmv_ack); sc->cmv_ack = 1; wake_up(&sc->sync_q); } static inline int wait_cmv_ack(struct uea_softc *sc) { int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT); sc->cmv_ack = 0; uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n", jiffies_to_msecs(ret)); if (ret < 0) return ret; return (ret == 0) ? -ETIMEDOUT : 0; } #define UCDC_SEND_ENCAPSULATED_COMMAND 0x00 static int uea_request(struct uea_softc *sc, u16 value, u16 index, u16 size, const void *data) { u8 *xfer_buff; int ret = -ENOMEM; xfer_buff = kmemdup(data, size, GFP_KERNEL); if (!xfer_buff) { uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n"); return ret; } ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0), UCDC_SEND_ENCAPSULATED_COMMAND, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, xfer_buff, size, CTRL_TIMEOUT); kfree(xfer_buff); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "usb_control_msg error %d\n", ret); return ret; } if (ret != size) { uea_err(INS_TO_USBDEV(sc), "usb_control_msg send only %d bytes (instead of %d)\n", ret, size); return -EIO; } return 0; } static int uea_cmv_e1(struct uea_softc *sc, u8 function, u32 address, u16 offset, u32 data) { struct cmv_e1 cmv; int ret; uea_enters(INS_TO_USBDEV(sc)); uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, " "offset : 0x%04x, data : 0x%08x\n", E1_FUNCTION_TYPE(function), E1_FUNCTION_SUBTYPE(function), E1_GETSA1(address), E1_GETSA2(address), E1_GETSA3(address), E1_GETSA4(address), offset, data); /* we send a request, but we expect a reply */ sc->cmv_dsc.e1.function = function | 0x2; sc->cmv_dsc.e1.idx++; sc->cmv_dsc.e1.address = address; sc->cmv_dsc.e1.offset = offset; cmv.wPreamble = cpu_to_le16(E1_PREAMBLE); cmv.bDirection = E1_HOSTTOMODEM; cmv.bFunction = function; cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); put_unaligned_le32(address, &cmv.dwSymbolicAddress); cmv.wOffsetAddress = cpu_to_le16(offset); put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData); ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); if (ret < 0) return ret; ret = wait_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return ret; } static int uea_cmv_e4(struct uea_softc *sc, u16 function, u16 group, u16 address, u16 offset, u32 data) { struct cmv_e4 cmv; int ret; uea_enters(INS_TO_USBDEV(sc)); memset(&cmv, 0, sizeof(cmv)); uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, " "Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n", E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function), group, address, offset, data); /* we send a request, but we expect a reply */ sc->cmv_dsc.e4.function = function | (0x1 << 4); sc->cmv_dsc.e4.offset = offset; sc->cmv_dsc.e4.address = address; sc->cmv_dsc.e4.group = group; cmv.wFunction = cpu_to_be16(function); cmv.wGroup = cpu_to_be16(group); cmv.wAddress = cpu_to_be16(address); cmv.wOffset = cpu_to_be16(offset); cmv.dwData[0] = cpu_to_be32(data); ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); if (ret < 0) return ret; ret = wait_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return ret; } static inline int uea_read_cmv_e1(struct uea_softc *sc, u32 address, u16 offset, u32 *data) { int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD), address, offset, 0); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "reading cmv failed with error %d\n", ret); else *data = sc->data; return ret; } static inline int uea_read_cmv_e4(struct uea_softc *sc, u8 size, u16 group, u16 address, u16 offset, u32 *data) { int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTREAD, size), group, address, offset, 0); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "reading cmv failed with error %d\n", ret); else { *data = sc->data; /* size is in 16-bit word quantities */ if (size > 2) *(data + 1) = sc->data1; } return ret; } static inline int uea_write_cmv_e1(struct uea_softc *sc, u32 address, u16 offset, u32 data) { int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE), address, offset, data); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "writing cmv failed with error %d\n", ret); return ret; } static inline int uea_write_cmv_e4(struct uea_softc *sc, u8 size, u16 group, u16 address, u16 offset, u32 data) { int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS, E4_REQUESTWRITE, size), group, address, offset, data); if (ret < 0) uea_err(INS_TO_USBDEV(sc), "writing cmv failed with error %d\n", ret); return ret; } static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate) { int ret; u16 timeout; /* in bulk mode the modem have problem with high rate * changing internal timing could improve things, but the * value is mysterious. * ADI930 don't support it (-EPIPE error). */ if (UEA_CHIP_VERSION(sc) == ADI930 || altsetting[sc->modem_index] > 0 || sc->stats.phy.dsrate == dsrate) return; /* Original timming (1Mbit/s) from ADI (used in windows driver) */ timeout = (dsrate <= 1024*1024) ? 0 : 1; ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL); uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n", timeout, ret < 0 ? " failed" : ""); } /* * Monitor the modem and update the stat * return 0 if everything is ok * return < 0 if an error occurs (-EAGAIN reboot needed) */ static int uea_stat_e1(struct uea_softc *sc) { u32 data; int ret; uea_enters(INS_TO_USBDEV(sc)); data = sc->stats.phy.state; ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state); if (ret < 0) return ret; switch (GET_STATUS(sc->stats.phy.state)) { case 0: /* not yet synchronized */ uea_dbg(INS_TO_USBDEV(sc), "modem not yet synchronized\n"); return 0; case 1: /* initialization */ uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n"); return 0; case 2: /* operational */ uea_vdbg(INS_TO_USBDEV(sc), "modem operational\n"); break; case 3: /* fail ... */ uea_info(INS_TO_USBDEV(sc), "modem synchronization failed" " (may be try other cmv/dsp)\n"); return -EAGAIN; case 4 ... 6: /* test state */ uea_warn(INS_TO_USBDEV(sc), "modem in test mode - not supported\n"); return -EAGAIN; case 7: /* fast-retain ... */ uea_info(INS_TO_USBDEV(sc), "modem in fast-retain mode\n"); return 0; default: uea_err(INS_TO_USBDEV(sc), "modem invalid SW mode %d\n", GET_STATUS(sc->stats.phy.state)); return -EAGAIN; } if (GET_STATUS(data) != 2) { uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL); uea_info(INS_TO_USBDEV(sc), "modem operational\n"); /* release the dsp firmware as it is not needed until * the next failure */ if (sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } } /* always update it as atm layer could not be init when we switch to * operational state */ UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags); if (ret < 0) return ret; sc->stats.phy.mflags |= sc->stats.phy.flags; /* in case of a flags ( for example delineation LOSS (& 0x10)), * we check the status again in order to detect the failure earlier */ if (sc->stats.phy.flags) { uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n", sc->stats.phy.flags); return 0; } ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data); if (ret < 0) return ret; uea_set_bulk_timeout(sc, (data >> 16) * 32); sc->stats.phy.dsrate = (data >> 16) * 32; sc->stats.phy.usrate = (data & 0xffff) * 32; UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424); ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data); if (ret < 0) return ret; sc->stats.phy.dsattenuation = (data & 0xff) / 2; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data); if (ret < 0) return ret; sc->stats.phy.usattenuation = (data & 0xff) / 2; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc); if (ret < 0) return ret; /* only for atu-c */ ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr); if (ret < 0) return ret; /* only for atu-c */ ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco); if (ret < 0) return ret; ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe); if (ret < 0) return ret; return 0; } static int uea_stat_e4(struct uea_softc *sc) { u32 data; u32 tmp_arr[2]; int ret; uea_enters(INS_TO_USBDEV(sc)); data = sc->stats.phy.state; /* XXX only need to be done before operationnal... */ ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state); if (ret < 0) return ret; switch (sc->stats.phy.state) { case 0x0: /* not yet synchronized */ case 0x1: case 0x3: case 0x4: uea_dbg(INS_TO_USBDEV(sc), "modem not yet " "synchronized\n"); return 0; case 0x5: /* initialization */ case 0x6: case 0x9: case 0xa: uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n"); return 0; case 0x2: /* fail ... */ uea_info(INS_TO_USBDEV(sc), "modem synchronization " "failed (may be try other cmv/dsp)\n"); return -EAGAIN; case 0x7: /* operational */ break; default: uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n", sc->stats.phy.state); return 0; } if (data != 7) { uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL); uea_info(INS_TO_USBDEV(sc), "modem operational\n"); /* release the dsp firmware as it is not needed until * the next failure */ if (sc->dsp_firm) { release_firmware(sc->dsp_firm); sc->dsp_firm = NULL; } } /* always update it as atm layer could not be init when we switch to * operational state */ UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND); /* wake up processes waiting for synchronization */ wake_up(&sc->sync_q); /* TODO improve this state machine : * we need some CMV info : what they do and their unit * we should find the equivalent of eagle3- CMV */ /* check flags */ ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags); if (ret < 0) return ret; sc->stats.phy.mflags |= sc->stats.phy.flags; /* in case of a flags ( for example delineation LOSS (& 0x10)), * we check the status again in order to detect the failure earlier */ if (sc->stats.phy.flags) { uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n", sc->stats.phy.flags); if (sc->stats.phy.flags & 1) /* delineation LOSS */ return -EAGAIN; if (sc->stats.phy.flags & 0x4000) /* Reset Flag */ return -EAGAIN; return 0; } /* rate data may be in upper or lower half of 64 bit word, strange */ ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr); if (ret < 0) return ret; data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1]; sc->stats.phy.usrate = data / 1000; ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr); if (ret < 0) return ret; data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1]; uea_set_bulk_timeout(sc, data / 1000); sc->stats.phy.dsrate = data / 1000; UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424); ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data); if (ret < 0) return ret; sc->stats.phy.dsattenuation = data / 10; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data); if (ret < 0) return ret; sc->stats.phy.usattenuation = data / 10; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data); if (ret < 0) return ret; sc->stats.phy.dsmargin = data / 2; ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data); if (ret < 0) return ret; sc->stats.phy.usmargin = data / 10; return 0; } static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver) { char file_arr[] = "CMVxy.bin"; char *file; kparam_block_sysfs_write(cmv_file); /* set proper name corresponding modem version and line type */ if (cmv_file[sc->modem_index] == NULL) { if (UEA_CHIP_VERSION(sc) == ADI930) file_arr[3] = '9'; else if (UEA_CHIP_VERSION(sc) == EAGLE_IV) file_arr[3] = '4'; else file_arr[3] = 'e'; file_arr[4] = IS_ISDN(sc) ? 'i' : 'p'; file = file_arr; } else file = cmv_file[sc->modem_index]; strcpy(cmv_name, FW_DIR); strlcat(cmv_name, file, UEA_FW_NAME_MAX); if (ver == 2) strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX); kparam_unblock_sysfs_write(cmv_file); } static int request_cmvs_old(struct uea_softc *sc, void **cmvs, const struct firmware **fw) { int ret, size; u8 *data; char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */ cmvs_file_name(sc, cmv_name, 1); ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", cmv_name, ret); return ret; } data = (u8 *) (*fw)->data; size = (*fw)->size; if (size < 1) goto err_fw_corrupted; if (size != *data * sizeof(struct uea_cmvs_v1) + 1) goto err_fw_corrupted; *cmvs = (void *)(data + 1); return *data; err_fw_corrupted: uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name); release_firmware(*fw); return -EILSEQ; } static int request_cmvs(struct uea_softc *sc, void **cmvs, const struct firmware **fw, int *ver) { int ret, size; u32 crc; u8 *data; char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */ cmvs_file_name(sc, cmv_name, 2); ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev); if (ret < 0) { /* if caller can handle old version, try to provide it */ if (*ver == 1) { uea_warn(INS_TO_USBDEV(sc), "requesting " "firmware %s failed, " "try to get older cmvs\n", cmv_name); return request_cmvs_old(sc, cmvs, fw); } uea_err(INS_TO_USBDEV(sc), "requesting firmware %s failed with error %d\n", cmv_name, ret); return ret; } size = (*fw)->size; data = (u8 *) (*fw)->data; if (size < 4 || strncmp(data, "cmv2", 4) != 0) { if (*ver == 1) { uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted," " try to get older cmvs\n", cmv_name); release_firmware(*fw); return request_cmvs_old(sc, cmvs, fw); } goto err_fw_corrupted; } *ver = 2; data += 4; size -= 4; if (size < 5) goto err_fw_corrupted; crc = get_unaligned_le32(data); data += 4; size -= 4; if (crc32_be(0, data, size) != crc) goto err_fw_corrupted; if (size != *data * sizeof(struct uea_cmvs_v2) + 1) goto err_fw_corrupted; *cmvs = (void *) (data + 1); return *data; err_fw_corrupted: uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name); release_firmware(*fw); return -EILSEQ; } static int uea_send_cmvs_e1(struct uea_softc *sc) { int i, ret, len; void *cmvs_ptr; const struct firmware *cmvs_fw; int ver = 1; /* we can handle v1 cmv firmware version; */ /* Enter in R-IDLE (cmv) until instructed otherwise */ ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1); if (ret < 0) return ret; /* Dump firmware version */ ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid); if (ret < 0) return ret; uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n", sc->stats.phy.firmid); /* get options */ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver); if (ret < 0) return ret; /* send options */ if (ver == 1) { struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr; uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, " "please update your firmware\n"); for (i = 0; i < len; i++) { ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address), get_unaligned_le16(&cmvs_v1[i].offset), get_unaligned_le32(&cmvs_v1[i].data)); if (ret < 0) goto out; } } else if (ver == 2) { struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; for (i = 0; i < len; i++) { ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address), (u16) get_unaligned_le32(&cmvs_v2[i].offset), get_unaligned_le32(&cmvs_v2[i].data)); if (ret < 0) goto out; } } else { /* This really should not happen */ uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver); goto out; } /* Enter in R-ACT-REQ */ ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2); uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n"); uea_info(INS_TO_USBDEV(sc), "modem started, waiting " "synchronization...\n"); out: release_firmware(cmvs_fw); return ret; } static int uea_send_cmvs_e4(struct uea_softc *sc) { int i, ret, len; void *cmvs_ptr; const struct firmware *cmvs_fw; int ver = 2; /* we can only handle v2 cmv firmware version; */ /* Enter in R-IDLE (cmv) until instructed otherwise */ ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1); if (ret < 0) return ret; /* Dump firmware version */ /* XXX don't read the 3th byte as it is always 6 */ ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid); if (ret < 0) return ret; uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n", sc->stats.phy.firmid); /* get options */ ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver); if (ret < 0) return ret; /* send options */ if (ver == 2) { struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; for (i = 0; i < len; i++) { ret = uea_write_cmv_e4(sc, 1, get_unaligned_le32(&cmvs_v2[i].group), get_unaligned_le32(&cmvs_v2[i].address), get_unaligned_le32(&cmvs_v2[i].offset), get_unaligned_le32(&cmvs_v2[i].data)); if (ret < 0) goto out; } } else { /* This really should not happen */ uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver); goto out; } /* Enter in R-ACT-REQ */ ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2); uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n"); uea_info(INS_TO_USBDEV(sc), "modem started, waiting " "synchronization...\n"); out: release_firmware(cmvs_fw); return ret; } /* Start boot post firmware modem: * - send reset commands through usb control pipe * - start workqueue for DSP loading * - send CMV options to modem */ static int uea_start_reset(struct uea_softc *sc) { u16 zero = 0; /* ;-) */ int ret; uea_enters(INS_TO_USBDEV(sc)); uea_info(INS_TO_USBDEV(sc), "(re)booting started\n"); /* mask interrupt */ sc->booting = 1; /* We need to set this here because, a ack timeout could have occurred, * but before we start the reboot, the ack occurs and set this to 1. * So we will failed to wait Ready CMV. */ sc->cmv_ack = 0; UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST); /* reset statistics */ memset(&sc->stats, 0, sizeof(struct uea_stats)); /* tell the modem that we want to boot in IDMA mode */ uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL); uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL); /* enter reset mode */ uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL); /* original driver use 200ms, but windows driver use 100ms */ ret = uea_wait(sc, 0, msecs_to_jiffies(100)); if (ret < 0) return ret; /* leave reset mode */ uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL); if (UEA_CHIP_VERSION(sc) != EAGLE_IV) { /* clear tx and rx mailboxes */ uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero); uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero); uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero); } ret = uea_wait(sc, 0, msecs_to_jiffies(1000)); if (ret < 0) return ret; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1); else sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY); /* demask interrupt */ sc->booting = 0; /* start loading DSP */ sc->pageno = 0; sc->ovl = 0; schedule_work(&sc->task); /* wait for modem ready CMV */ ret = wait_cmv_ack(sc); if (ret < 0) return ret; uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n"); ret = sc->send_cmvs(sc); if (ret < 0) return ret; sc->reset = 0; uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* * In case of an error wait 1s before rebooting the modem * if the modem don't request reboot (-EAGAIN). * Monitor the modem every 1s. */ static int uea_kthread(void *data) { struct uea_softc *sc = data; int ret = -EAGAIN; set_freezable(); uea_enters(INS_TO_USBDEV(sc)); while (!kthread_should_stop()) { if (ret < 0 || sc->reset) ret = uea_start_reset(sc); if (!ret) ret = sc->stat(sc); if (ret != -EAGAIN) uea_wait(sc, 0, msecs_to_jiffies(1000)); try_to_freeze(); } uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* Load second usb firmware for ADI930 chip */ static int load_XILINX_firmware(struct uea_softc *sc) { const struct firmware *fw_entry; int ret, size, u, ln; const u8 *pfw; u8 value; char *fw_name = FW_DIR "930-fpga.bin"; uea_enters(INS_TO_USBDEV(sc)); ret = request_firmware(&fw_entry, fw_name, &sc->usb_dev->dev); if (ret) { uea_err(INS_TO_USBDEV(sc), "firmware %s is not available\n", fw_name); goto err0; } pfw = fw_entry->data; size = fw_entry->size; if (size != 0x577B) { uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", fw_name); ret = -EILSEQ; goto err1; } for (u = 0; u < size; u += ln) { ln = min(size - u, 64); ret = uea_request(sc, 0xe, 0, ln, pfw + u); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "elsa download data failed (%d)\n", ret); goto err1; } } /* finish to send the fpga */ ret = uea_request(sc, 0xe, 1, 0, NULL); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "elsa download data failed (%d)\n", ret); goto err1; } /* Tell the modem we finish : de-assert reset */ value = 0; ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value); if (ret < 0) uea_err(sc->usb_dev, "elsa de-assert failed with error" " %d\n", ret); err1: release_firmware(fw_entry); err0: uea_leaves(INS_TO_USBDEV(sc)); return ret; } /* The modem send us an ack. First with check if it right */ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) { struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1; struct cmv_e1 *cmv = &intr->u.e1.s2.cmv; uea_enters(INS_TO_USBDEV(sc)); if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE) goto bad1; if (cmv->bDirection != E1_MODEMTOHOST) goto bad1; /* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to * the first MEMACCESS cmv. Ignore it... */ if (cmv->bFunction != dsc->function) { if (UEA_CHIP_VERSION(sc) == ADI930 && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { cmv->wIndex = cpu_to_le16(dsc->idx); put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress); cmv->wOffsetAddress = cpu_to_le16(dsc->offset); } else goto bad2; } if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE, E1_MODEMREADY)) { wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; } /* in case of MEMACCESS */ if (le16_to_cpu(cmv->wIndex) != dsc->idx || get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address || le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) goto bad2; sc->data = get_unaligned_le32(&cmv->dwData); sc->data = sc->data << 16 | sc->data >> 16; wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; bad2: uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, " "Function : %d, Subfunction : %d\n", E1_FUNCTION_TYPE(cmv->bFunction), E1_FUNCTION_SUBTYPE(cmv->bFunction)); uea_leaves(INS_TO_USBDEV(sc)); return; bad1: uea_err(INS_TO_USBDEV(sc), "invalid cmv received, " "wPreamble %d, bDirection %d\n", le16_to_cpu(cmv->wPreamble), cmv->bDirection); uea_leaves(INS_TO_USBDEV(sc)); } /* The modem send us an ack. First with check if it right */ static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr) { struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4; struct cmv_e4 *cmv = &intr->u.e4.s2.cmv; uea_enters(INS_TO_USBDEV(sc)); uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n", be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction), be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress), be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1])); if (be16_to_cpu(cmv->wFunction) != dsc->function) goto bad2; if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE, E4_MODEMREADY, 1)) { wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; } /* in case of MEMACCESS */ if (be16_to_cpu(cmv->wOffset) != dsc->offset || be16_to_cpu(cmv->wGroup) != dsc->group || be16_to_cpu(cmv->wAddress) != dsc->address) goto bad2; sc->data = be32_to_cpu(cmv->dwData[0]); sc->data1 = be32_to_cpu(cmv->dwData[1]); wake_up_cmv_ack(sc); uea_leaves(INS_TO_USBDEV(sc)); return; bad2: uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, " "Function : %d, Subfunction : %d\n", E4_FUNCTION_TYPE(cmv->wFunction), E4_FUNCTION_SUBTYPE(cmv->wFunction)); uea_leaves(INS_TO_USBDEV(sc)); return; } static void uea_schedule_load_page_e1(struct uea_softc *sc, struct intr_pkt *intr) { sc->pageno = intr->e1_bSwapPageNo; sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4; schedule_work(&sc->task); } static void uea_schedule_load_page_e4(struct uea_softc *sc, struct intr_pkt *intr) { sc->pageno = intr->e4_bSwapPageNo; schedule_work(&sc->task); } /* * interrupt handler */ static void uea_intr(struct urb *urb) { struct uea_softc *sc = urb->context; struct intr_pkt *intr = urb->transfer_buffer; int status = urb->status; uea_enters(INS_TO_USBDEV(sc)); if (unlikely(status < 0)) { uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n", status); return; } /* device-to-host interrupt */ if (intr->bType != 0x08 || sc->booting) { uea_err(INS_TO_USBDEV(sc), "wrong interrupt\n"); goto resubmit; } switch (le16_to_cpu(intr->wInterrupt)) { case INT_LOADSWAPPAGE: sc->schedule_load_page(sc, intr); break; case INT_INCOMINGCMV: sc->dispatch_cmv(sc, intr); break; default: uea_err(INS_TO_USBDEV(sc), "unknown interrupt %u\n", le16_to_cpu(intr->wInterrupt)); } resubmit: usb_submit_urb(sc->urb_int, GFP_ATOMIC); } /* * Start the modem : init the data and start kernel thread */ static int uea_boot(struct uea_softc *sc) { int ret, size; struct intr_pkt *intr; uea_enters(INS_TO_USBDEV(sc)); if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { size = E4_INTR_PKT_SIZE; sc->dispatch_cmv = uea_dispatch_cmv_e4; sc->schedule_load_page = uea_schedule_load_page_e4; sc->stat = uea_stat_e4; sc->send_cmvs = uea_send_cmvs_e4; INIT_WORK(&sc->task, uea_load_page_e4); } else { size = E1_INTR_PKT_SIZE; sc->dispatch_cmv = uea_dispatch_cmv_e1; sc->schedule_load_page = uea_schedule_load_page_e1; sc->stat = uea_stat_e1; sc->send_cmvs = uea_send_cmvs_e1; INIT_WORK(&sc->task, uea_load_page_e1); } init_waitqueue_head(&sc->sync_q); if (UEA_CHIP_VERSION(sc) == ADI930) load_XILINX_firmware(sc); intr = kmalloc(size, GFP_KERNEL); if (!intr) { uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt package\n"); goto err0; } sc->urb_int = usb_alloc_urb(0, GFP_KERNEL); if (!sc->urb_int) { uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n"); goto err1; } usb_fill_int_urb(sc->urb_int, sc->usb_dev, usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE), intr, size, uea_intr, sc, sc->usb_dev->actconfig->interface[0]->altsetting[0]. endpoint[0].desc.bInterval); ret = usb_submit_urb(sc->urb_int, GFP_KERNEL); if (ret < 0) { uea_err(INS_TO_USBDEV(sc), "urb submition failed with error %d\n", ret); goto err1; } /* Create worker thread, but don't start it here. Start it after * all usbatm generic initialization is done. */ sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm"); if (IS_ERR(sc->kthread)) { uea_err(INS_TO_USBDEV(sc), "failed to create thread\n"); goto err2; } uea_leaves(INS_TO_USBDEV(sc)); return 0; err2: usb_kill_urb(sc->urb_int); err1: usb_free_urb(sc->urb_int); sc->urb_int = NULL; kfree(intr); err0: uea_leaves(INS_TO_USBDEV(sc)); return -ENOMEM; } /* * Stop the modem : kill kernel thread and free data */ static void uea_stop(struct uea_softc *sc) { int ret; uea_enters(INS_TO_USBDEV(sc)); ret = kthread_stop(sc->kthread); uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret); uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL); usb_kill_urb(sc->urb_int); kfree(sc->urb_int->transfer_buffer); usb_free_urb(sc->urb_int); /* flush the work item, when no one can schedule it */ flush_work_sync(&sc->task); if (sc->dsp_firm) release_firmware(sc->dsp_firm); uea_leaves(INS_TO_USBDEV(sc)); } /* syfs interface */ static struct uea_softc *dev_to_uea(struct device *dev) { struct usb_interface *intf; struct usbatm_data *usbatm; intf = to_usb_interface(dev); if (!intf) return NULL; usbatm = usb_get_intfdata(intf); if (!usbatm) return NULL; return usbatm->driver_data; } static ssize_t read_status(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state); out: mutex_unlock(&uea_mutex); return ret; } static ssize_t reboot(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret = -ENODEV; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; sc->reset = 1; ret = count; out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); static ssize_t read_human_status(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; int modem_state; struct uea_softc *sc; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { switch (sc->stats.phy.state) { case 0x0: /* not yet synchronized */ case 0x1: case 0x3: case 0x4: modem_state = 0; break; case 0x5: /* initialization */ case 0x6: case 0x9: case 0xa: modem_state = 1; break; case 0x7: /* operational */ modem_state = 2; break; case 0x2: /* fail ... */ modem_state = 3; break; default: /* unknown */ modem_state = 4; break; } } else modem_state = GET_STATUS(sc->stats.phy.state); switch (modem_state) { case 0: ret = sprintf(buf, "Modem is booting\n"); break; case 1: ret = sprintf(buf, "Modem is initializing\n"); break; case 2: ret = sprintf(buf, "Modem is operational\n"); break; case 3: ret = sprintf(buf, "Modem synchronization failed\n"); break; default: ret = sprintf(buf, "Modem state is unknown\n"); break; } out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); static ssize_t read_delin(struct device *dev, struct device_attribute *attr, char *buf) { int ret = -ENODEV; struct uea_softc *sc; char *delin = "GOOD"; mutex_lock(&uea_mutex); sc = dev_to_uea(dev); if (!sc) goto out; if (UEA_CHIP_VERSION(sc) == EAGLE_IV) { if (sc->stats.phy.flags & 0x4000) delin = "RESET"; else if (sc->stats.phy.flags & 0x0001) delin = "LOSS"; } else { if (sc->stats.phy.flags & 0x0C00) delin = "ERROR"; else if (sc->stats.phy.flags & 0x0030) delin = "LOSS"; } ret = sprintf(buf, "%s\n", delin); out: mutex_unlock(&uea_mutex); return ret; } static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); #define UEA_ATTR(name, reset) \ \ static ssize_t read_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ int ret = -ENODEV; \ struct uea_softc *sc; \ \ mutex_lock(&uea_mutex); \ sc = dev_to_uea(dev); \ if (!sc) \ goto out; \ ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \ if (reset) \ sc->stats.phy.name = 0; \ out: \ mutex_unlock(&uea_mutex); \ return ret; \ } \ \ static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL) UEA_ATTR(mflags, 1); UEA_ATTR(vidcpe, 0); UEA_ATTR(usrate, 0); UEA_ATTR(dsrate, 0); UEA_ATTR(usattenuation, 0); UEA_ATTR(dsattenuation, 0); UEA_ATTR(usmargin, 0); UEA_ATTR(dsmargin, 0); UEA_ATTR(txflow, 0); UEA_ATTR(rxflow, 0); UEA_ATTR(uscorr, 0); UEA_ATTR(dscorr, 0); UEA_ATTR(usunc, 0); UEA_ATTR(dsunc, 0); UEA_ATTR(firmid, 0); /* Retrieve the device End System Identifier (MAC) */ static int uea_getesi(struct uea_softc *sc, u_char * esi) { unsigned char mac_str[2 * ETH_ALEN + 1]; int i; if (usb_string (sc->usb_dev, sc->usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) != 2 * ETH_ALEN) return 1; for (i = 0; i < ETH_ALEN; i++) esi[i] = hex_to_bin(mac_str[2 * i]) * 16 + hex_to_bin(mac_str[2 * i + 1]); return 0; } /* ATM stuff */ static int uea_atm_open(struct usbatm_data *usbatm, struct atm_dev *atm_dev) { struct uea_softc *sc = usbatm->driver_data; return uea_getesi(sc, atm_dev->esi); } static int uea_heavy(struct usbatm_data *usbatm, struct usb_interface *intf) { struct uea_softc *sc = usbatm->driver_data; wait_event_interruptible(sc->sync_q, IS_OPERATIONAL(sc)); return 0; } static int claim_interface(struct usb_device *usb_dev, struct usbatm_data *usbatm, int ifnum) { int ret; struct usb_interface *intf = usb_ifnum_to_if(usb_dev, ifnum); if (!intf) { uea_err(usb_dev, "interface %d not found\n", ifnum); return -ENODEV; } ret = usb_driver_claim_interface(&uea_driver, intf, usbatm); if (ret != 0) uea_err(usb_dev, "can't claim interface %d, error %d\n", ifnum, ret); return ret; } static struct attribute *attrs[] = { &dev_attr_stat_status.attr, &dev_attr_stat_mflags.attr, &dev_attr_stat_human_status.attr, &dev_attr_stat_delin.attr, &dev_attr_stat_vidcpe.attr, &dev_attr_stat_usrate.attr, &dev_attr_stat_dsrate.attr, &dev_attr_stat_usattenuation.attr, &dev_attr_stat_dsattenuation.attr, &dev_attr_stat_usmargin.attr, &dev_attr_stat_dsmargin.attr, &dev_attr_stat_txflow.attr, &dev_attr_stat_rxflow.attr, &dev_attr_stat_uscorr.attr, &dev_attr_stat_dscorr.attr, &dev_attr_stat_usunc.attr, &dev_attr_stat_dsunc.attr, &dev_attr_stat_firmid.attr, NULL, }; static struct attribute_group attr_grp = { .attrs = attrs, }; static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb = interface_to_usbdev(intf); struct uea_softc *sc; int ret, ifnum = intf->altsetting->desc.bInterfaceNumber; unsigned int alt; uea_enters(usb); /* interface 0 is for firmware/monitoring */ if (ifnum != UEA_INTR_IFACE_NO) return -ENODEV; usbatm->flags = (sync_wait[modem_index] ? 0 : UDSL_SKIP_HEAVY_INIT); /* interface 1 is for outbound traffic */ ret = claim_interface(usb, usbatm, UEA_US_IFACE_NO); if (ret < 0) return ret; /* ADI930 has only 2 interfaces and inbound traffic is on interface 1 */ if (UEA_CHIP_VERSION(id) != ADI930) { /* interface 2 is for inbound traffic */ ret = claim_interface(usb, usbatm, UEA_DS_IFACE_NO); if (ret < 0) return ret; } sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL); if (!sc) { uea_err(usb, "uea_init: not enough memory !\n"); return -ENOMEM; } sc->usb_dev = usb; usbatm->driver_data = sc; sc->usbatm = usbatm; sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0; sc->driver_info = id->driver_info; /* first try to use module parameter */ if (annex[sc->modem_index] == 1) sc->annex = ANNEXA; else if (annex[sc->modem_index] == 2) sc->annex = ANNEXB; /* try to autodetect annex */ else if (sc->driver_info & AUTO_ANNEX_A) sc->annex = ANNEXA; else if (sc->driver_info & AUTO_ANNEX_B) sc->annex = ANNEXB; else sc->annex = (le16_to_cpu (sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA; alt = altsetting[sc->modem_index]; /* ADI930 don't support iso */ if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) { if (alt <= 8 && usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) { uea_dbg(usb, "set alternate %u for 2 interface\n", alt); uea_info(usb, "using iso mode\n"); usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ; } else { uea_err(usb, "setting alternate %u failed for " "2 interface, using bulk mode\n", alt); } } ret = sysfs_create_group(&intf->dev.kobj, &attr_grp); if (ret < 0) goto error; ret = uea_boot(sc); if (ret < 0) goto error_rm_grp; return 0; error_rm_grp: sysfs_remove_group(&intf->dev.kobj, &attr_grp); error: kfree(sc); return ret; } static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf) { struct uea_softc *sc = usbatm->driver_data; sysfs_remove_group(&intf->dev.kobj, &attr_grp); uea_stop(sc); kfree(sc); } static struct usbatm_driver uea_usbatm_driver = { .driver_name = "ueagle-atm", .bind = uea_bind, .atm_start = uea_atm_open, .unbind = uea_unbind, .heavy_init = uea_heavy, .bulk_in = UEA_BULK_DATA_PIPE, .bulk_out = UEA_BULK_DATA_PIPE, .isoc_in = UEA_ISO_DATA_PIPE, }; static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb = interface_to_usbdev(intf); int ret; uea_enters(usb); uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n", le16_to_cpu(usb->descriptor.idVendor), le16_to_cpu(usb->descriptor.idProduct), le16_to_cpu(usb->descriptor.bcdDevice), chip_name[UEA_CHIP_VERSION(id)]); usb_reset_device(usb); if (UEA_IS_PREFIRM(id)) return uea_load_firmware(usb, UEA_CHIP_VERSION(id)); ret = usbatm_usb_probe(intf, id, &uea_usbatm_driver); if (ret == 0) { struct usbatm_data *usbatm = usb_get_intfdata(intf); struct uea_softc *sc = usbatm->driver_data; /* Ensure carrier is initialized to off as early as possible */ UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST); /* Only start the worker thread when all init is done */ wake_up_process(sc->kthread); } return ret; } static void uea_disconnect(struct usb_interface *intf) { struct usb_device *usb = interface_to_usbdev(intf); int ifnum = intf->altsetting->desc.bInterfaceNumber; uea_enters(usb); /* ADI930 has 2 interfaces and eagle 3 interfaces. * Pre-firmware device has one interface */ if (usb->config->desc.bNumInterfaces != 1 && ifnum == 0) { mutex_lock(&uea_mutex); usbatm_usb_disconnect(intf); mutex_unlock(&uea_mutex); uea_info(usb, "ADSL device removed\n"); } uea_leaves(usb); } /* * List of supported VID/PID */ static const struct usb_device_id uea_ids[] = { {USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM), .driver_info = EAGLE_III | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM), .driver_info = EAGLE_III | PSTFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM), .driver_info = EAGLE_IV | PREFIRM}, {USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM), .driver_info = EAGLE_IV | PSTFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM), .driver_info = EAGLE_II | PREFIRM}, {USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM), .driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM), .driver_info = ADI930 | PSTFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM), .driver_info = ADI930 | PREFIRM}, {USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM), .driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A}, {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM), .driver_info = EAGLE_I | PREFIRM}, {USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM), .driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B}, {} }; /* * USB driver descriptor */ static struct usb_driver uea_driver = { .name = "ueagle-atm", .id_table = uea_ids, .probe = uea_probe, .disconnect = uea_disconnect, }; MODULE_DEVICE_TABLE(usb, uea_ids); /** * uea_init - Initialize the module. * Register to USB subsystem */ static int __init uea_init(void) { printk(KERN_INFO "[ueagle-atm] driver " EAGLEUSBVERSION " loaded\n"); usb_register(&uea_driver); return 0; } module_init(uea_init); /** * uea_exit - Destroy module * Deregister with USB subsystem */ static void __exit uea_exit(void) { /* * This calls automatically the uea_disconnect method if necessary: */ usb_deregister(&uea_driver); printk(KERN_INFO "[ueagle-atm] driver unloaded\n"); } module_exit(uea_exit); MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka"); MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
scs/uclinux
lib/zlib/inflate.c
2523
48977
/* inflate.c -- zlib decompression * Copyright (C) 1995-2005 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * Change history: * * 1.2.beta0 24 Nov 2002 * - First version -- complete rewrite of inflate to simplify code, avoid * creation of window when not needed, minimize use of window when it is * needed, make inffast.c even faster, implement gzip decoding, and to * improve code readability and style over the previous zlib inflate code * * 1.2.beta1 25 Nov 2002 * - Use pointers for available input and output checking in inffast.c * - Remove input and output counters in inffast.c * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 * - Remove unnecessary second byte pull from length extra in inffast.c * - Unroll direct copy to three copies per loop in inffast.c * * 1.2.beta2 4 Dec 2002 * - Change external routine names to reduce potential conflicts * - Correct filename to inffixed.h for fixed tables in inflate.c * - Make hbuf[] unsigned char to match parameter type in inflate.c * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) * to avoid negation problem on Alphas (64 bit) in inflate.c * * 1.2.beta3 22 Dec 2002 * - Add comments on state->bits assertion in inffast.c * - Add comments on op field in inftrees.h * - Fix bug in reuse of allocated window after inflateReset() * - Remove bit fields--back to byte structure for speed * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths * - Change post-increments to pre-increments in inflate_fast(), PPC biased? * - Add compile time option, POSTINC, to use post-increments instead (Intel?) * - Make MATCH copy in inflate() much faster for when inflate_fast() not used * - Use local copies of stream next and avail values, as well as local bit * buffer and bit count in inflate()--for speed when inflate_fast() not used * * 1.2.beta4 1 Jan 2003 * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings * - Move a comment on output buffer sizes from inffast.c to inflate.c * - Add comments in inffast.c to introduce the inflate_fast() routine * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed * - Pull out common write == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new * source file infback.c to provide a call-back interface to inflate for * programs like gzip and unzip -- uses window as output buffer to avoid * window copying * * 1.2.beta5 1 Jan 2003 * - Improved inflateBack() interface to allow the caller to provide initial * input in strm. * - Fixed stored blocks bug in inflateBack() * * 1.2.beta6 4 Jan 2003 * - Added comments in inffast.c on effectiveness of POSTINC * - Typecasting all around to reduce compiler warnings * - Changed loops from while (1) or do {} while (1) to for (;;), again to * make compilers happy * - Changed type of window in inflateBackInit() to unsigned char * * * 1.2.beta7 27 Jan 2003 * - Changed many types to unsigned or unsigned short to avoid warnings * - Added inflateCopy() function * * 1.2.0 9 Mar 2003 * - Changed inflateBack() interface to provide separate opaque descriptors * for the in() and out() functions * - Changed inflateBack() argument and in_func typedef to swap the length * and buffer address return values for the input function * - Check next_in and next_out for Z_NULL on entry to inflate() * * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef MAKEFIXED # ifndef BUILDFIXED # define BUILDFIXED # endif #endif /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); local int updatewindow OF((z_streamp strm, unsigned out)); #ifdef BUILDFIXED void makefixed OF((void)); #endif local unsigned syncsearch OF((unsigned FAR *have, unsigned char FAR *buf, unsigned len)); int ZEXPORT inflateReset(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = Z_NULL; strm->adler = 1; /* to support ill-conceived Java test suite */ state->mode = HEAD; state->last = 0; state->havedict = 0; state->dmax = 32768U; state->head = Z_NULL; state->wsize = 0; state->whave = 0; state->write = 0; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; Tracev((stderr, "inflate: reset\n")); return Z_OK; } int ZEXPORT inflatePrime(strm, bits, value) z_streamp strm; int bits; int value; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += value << state->bits; state->bits += bits; return Z_OK; } int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) z_streamp strm; int windowBits; const char *version; int stream_size; { struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { strm->zalloc = zcalloc; strm->opaque = (voidpf)0; } if (strm->zfree == (free_func)0) strm->zfree = zcfree; state = (struct inflate_state FAR *) ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; if (windowBits < 0) { state->wrap = 0; windowBits = -windowBits; } else { state->wrap = (windowBits >> 4) + 1; #ifdef GUNZIP if (windowBits < 48) windowBits &= 15; #endif } if (windowBits < 8 || windowBits > 15) { ZFREE(strm, state); strm->state = Z_NULL; return Z_STREAM_ERROR; } state->wbits = (unsigned)windowBits; state->window = Z_NULL; return inflateReset(strm); } int ZEXPORT inflateInit_(strm, version, stream_size) z_streamp strm; const char *version; int stream_size; { return inflateInit2_(strm, DEF_WBITS, version, stream_size); } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } #ifdef MAKEFIXED #include <stdio.h> /* Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also defines BUILDFIXED, so the tables are built on the fly. makefixed() writes those tables to stdout, which would be piped to inffixed.h. A small program can simply call makefixed to do this: void makefixed(void); int main(void) { makefixed(); return 0; } Then that can be linked with zlib built with MAKEFIXED defined and run: a.out > inffixed.h */ void makefixed() { unsigned low, size; struct inflate_state state; fixedtables(&state); puts(" /* inffixed.h -- table for decoding fixed codes"); puts(" * Generated automatically by makefixed()."); puts(" */"); puts(""); puts(" /* WARNING: this file should *not* be used by applications."); puts(" It is part of the implementation of this library and is"); puts(" subject to change. Applications should only use zlib.h."); puts(" */"); puts(""); size = 1U << 9; printf(" static const code lenfix[%u] = {", size); low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); printf("{%u,%u,%d}", state.lencode[low].op, state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); size = 1U << 5; printf("\n static const code distfix[%u] = {", size); low = 0; for (;;) { if ((low % 6) == 0) printf("\n "); printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, state.distcode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); } #endif /* MAKEFIXED */ /* Update the window with the last wsize (normally 32K) bytes written before returning. If window does not exist yet, create it. This is only called when a window is already in use, or when output has been written during this inflate call, but the end of the deflate stream has not been reached yet. It is also called to create a window for dictionary data when a dictionary is loaded. Providing output buffers larger than 32K to inflate() should provide a speed advantage, since only the last 32K of output is copied to the sliding window upon return from inflate(), and since all distances after the first 32K of output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ local int updatewindow(strm, out) z_streamp strm; unsigned out; { struct inflate_state FAR *state; unsigned copy, dist; state = (struct inflate_state FAR *)strm->state; /* if it hasn't been done already, allocate space for the window */ if (state->window == Z_NULL) { state->window = (unsigned char FAR *) ZALLOC(strm, 1U << state->wbits, sizeof(unsigned char)); if (state->window == Z_NULL) return 1; } /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; state->write = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ copy = out - strm->avail_out; if (copy >= state->wsize) { zmemcpy(state->window, strm->next_out - state->wsize, state->wsize); state->write = 0; state->whave = state->wsize; } else { dist = state->wsize - state->write; if (dist > copy) dist = copy; zmemcpy(state->window + state->write, strm->next_out - copy, dist); copy -= dist; if (copy) { zmemcpy(state->window, strm->next_out - copy, copy); state->write = copy; state->whave = state->wsize; } else { state->write += dist; if (state->write == state->wsize) state->write = 0; if (state->whave < state->wsize) state->whave += dist; } } return 0; } /* Macros for inflate(): */ /* check function to use adler32() for zlib or crc32() for gzip */ #ifdef GUNZIP # define UPDATE(check, buf, len) \ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) #else # define UPDATE(check, buf, len) adler32(check, buf, len) #endif /* check macros for header crc */ #ifdef GUNZIP # define CRC2(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ check = crc32(check, hbuf, 2); \ } while (0) # define CRC4(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ hbuf[2] = (unsigned char)((word) >> 16); \ hbuf[3] = (unsigned char)((word) >> 24); \ check = crc32(check, hbuf, 4); \ } while (0) #endif /* Load registers with state in inflate() for speed */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Restore state from registers in inflate() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflate() if there is no input available. */ #define PULLBYTE() \ do { \ if (have == 0) goto inf_leave; \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflate(). */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Reverse the bytes in a 32-bit value */ #define REVERSE(q) \ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is structured roughly as follows: for (;;) switch (state) { ... case STATEn: if (not enough input data or output space to make progress) return; ... make progress ... state = STATEm; break; ... } so when inflate() is called again, the same case is attempted again, and if the appropriate resources are provided, the machine proceeds to the next state. The NEEDBITS() macro is usually the way the state evaluates whether it can proceed or should return. NEEDBITS() does the return if the requested bits are not available. The typical use of the BITS macros is: NEEDBITS(n); ... do something with BITS(n) ... DROPBITS(n); where NEEDBITS(n) either returns from inflate() if there isn't enough input left to load n bits into the accumulator, or it continues. BITS(n) gives the low n bits in the accumulator. When done, DROPBITS(n) drops the low n bits off the accumulator. INITBITS() clears the accumulator and sets the number of available bits to zero. BYTEBITS() discards just enough bits to put the accumulator on a byte boundary. After BYTEBITS() and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return if there is no input available. The decoding of variable length codes uses PULLBYTE() directly in order to pull just enough bytes to decode the next code, and no more. Some states loop until they get enough input, making sure that enough state information is maintained to continue the loop where it left off if NEEDBITS() returns in the loop. For example, want, need, and keep would all have to actually be part of the saved state in case NEEDBITS() returns: case STATEw: while (want < need) { NEEDBITS(n); keep[want++] = BITS(n); DROPBITS(n); } state = STATEx; case STATEx: As shown above, if the next state is also the next case, then the break is omitted. A state may also return if there is not enough output space available to complete that state. Those states are copying stored data, writing a literal byte, and copying a matching string. When returning, a "goto inf_leave" is used to update the total counters, update the check value, and determine whether any progress has been made during that inflate() call in order to return the proper return code. Progress is defined as a change in either strm->avail_in or strm->avail_out. When there is a window, goto inf_leave will update the window with the last output written. If a goto inf_leave occurs in the middle of decompression and there is no window currently, goto inf_leave will create one and copy output to the window for the next call of inflate(). In this implementation, the flush parameter of inflate() only affects the return code (per zlib.h). inflate() always writes as much as possible to strm->next_out, given the space available and the provided input--the effect documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers the allocation of and copying into a sliding window until necessary, which provides the effect documented in zlib.h for Z_FINISH when the entire input stream available. So the only thing the flush parameter actually does is: when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it will return Z_BUF_ERROR if it has not reached the end of the stream. */ int ZEXPORT inflate(strm, flush) z_streamp strm; int flush; { struct inflate_state FAR *state; unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code this; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ #ifdef GUNZIP unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ #endif static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; if (strm == Z_NULL || strm->state == Z_NULL || strm->next_out == Z_NULL || (strm->next_in == Z_NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ LOAD(); in = have; out = left; ret = Z_OK; for (;;) switch (state->mode) { case HEAD: if (state->wrap == 0) { state->mode = TYPEDO; break; } NEEDBITS(16); #ifdef GUNZIP if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ state->check = crc32(0L, Z_NULL, 0); CRC2(state->check, hold); INITBITS(); state->mode = FLAGS; break; } state->flags = 0; /* expect zlib header */ if (state->head != Z_NULL) state->head->done = -1; if (!(state->wrap & 1) || /* check if zlib header allowed */ #else if ( #endif ((BITS(8) << 8) + (hold >> 8)) % 31) { strm->msg = (char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } DROPBITS(4); len = BITS(4) + 8; if (len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; } state->dmax = 1U << len; Tracev((stderr, "inflate: zlib header ok\n")); strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = hold & 0x200 ? DICTID : TYPE; INITBITS(); break; #ifdef GUNZIP case FLAGS: NEEDBITS(16); state->flags = (int)(hold); if ((state->flags & 0xff) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } if (state->flags & 0xe000) { strm->msg = (char *)"unknown header flags set"; state->mode = BAD; break; } if (state->head != Z_NULL) state->head->text = (int)((hold >> 8) & 1); if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = TIME; case TIME: NEEDBITS(32); if (state->head != Z_NULL) state->head->time = hold; if (state->flags & 0x0200) CRC4(state->check, hold); INITBITS(); state->mode = OS; case OS: NEEDBITS(16); if (state->head != Z_NULL) { state->head->xflags = (int)(hold & 0xff); state->head->os = (int)(hold >> 8); } if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); state->mode = EXLEN; case EXLEN: if (state->flags & 0x0400) { NEEDBITS(16); state->length = (unsigned)(hold); if (state->head != Z_NULL) state->head->extra_len = (unsigned)hold; if (state->flags & 0x0200) CRC2(state->check, hold); INITBITS(); } else if (state->head != Z_NULL) state->head->extra = Z_NULL; state->mode = EXTRA; case EXTRA: if (state->flags & 0x0400) { copy = state->length; if (copy > have) copy = have; if (copy) { if (state->head != Z_NULL && state->head->extra != Z_NULL) { len = state->head->extra_len - state->length; zmemcpy(state->head->extra + len, next, len + copy > state->head->extra_max ? state->head->extra_max - len : copy); } if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; state->length -= copy; } if (state->length) goto inf_leave; } state->length = 0; state->mode = NAME; case NAME: if (state->flags & 0x0800) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->name != Z_NULL && state->length < state->head->name_max) state->head->name[state->length++] = len; } while (len && copy < have); if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->name = Z_NULL; state->length = 0; state->mode = COMMENT; case COMMENT: if (state->flags & 0x1000) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->comment != Z_NULL && state->length < state->head->comm_max) state->head->comment[state->length++] = len; } while (len && copy < have); if (state->flags & 0x0200) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->comment = Z_NULL; state->mode = HCRC; case HCRC: if (state->flags & 0x0200) { NEEDBITS(16); if (hold != (state->check & 0xffff)) { strm->msg = (char *)"header crc mismatch"; state->mode = BAD; break; } INITBITS(); } if (state->head != Z_NULL) { state->head->hcrc = (int)((state->flags >> 9) & 1); state->head->done = 1; } strm->adler = state->check = crc32(0L, Z_NULL, 0); state->mode = TYPE; break; #endif case DICTID: NEEDBITS(32); strm->adler = state->check = REVERSE(hold); INITBITS(); state->mode = DICT; case DICT: if (state->havedict == 0) { RESTORE(); return Z_NEED_DICT; } strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; case TYPE: if (flush == Z_BLOCK) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); state->mode = CHECK; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); state->mode = COPY; case COPY: copy = state->length; if (copy) { if (copy > have) copy = have; if (copy > left) copy = left; if (copy == 0) goto inf_leave; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; break; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); state->have = 0; state->mode = LENLENS; case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); state->have = 0; state->mode = CODELENS; case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.val < 16) { NEEDBITS(this.bits); DROPBITS(this.bits); state->lens[state->have++] = this.val; } else { if (this.val == 16) { NEEDBITS(this.bits + 2); DROPBITS(this.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = state->lens[state->have - 1]; copy = 3 + BITS(2); DROPBITS(2); } else if (this.val == 17) { NEEDBITS(this.bits + 3); DROPBITS(this.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(this.bits + 7); DROPBITS(this.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* build code tables */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); break; } for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.op && (this.op & 0xf0) == 0) { last = this; for (;;) { this = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); state->length = (unsigned)this.val; if ((int)(this.op) == 0) { Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", this.val)); state->mode = LIT; break; } if (this.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } if (this.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } state->extra = (unsigned)(this.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } Tracevv((stderr, "inflate: length %u\n", state->length)); state->mode = DIST; case DIST: for (;;) { this = state->distcode[BITS(state->distbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if ((this.op & 0xf0) == 0) { last = this; for (;;) { this = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); if (this.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)this.val; state->extra = (unsigned)(this.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif if (state->offset > state->whave + out - left) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; case MATCH: if (left == 0) goto inf_leave; copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; if (copy > state->write) { copy -= state->write; from = state->window + (state->wsize - copy); } else from = state->window + (state->write - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ from = put - state->offset; copy = state->length; } if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = *from++; } while (--copy); if (state->length == 0) state->mode = LEN; break; case LIT: if (left == 0) goto inf_leave; *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; case CHECK: if (state->wrap) { NEEDBITS(32); out -= left; strm->total_out += out; state->total += out; if (out) strm->adler = state->check = UPDATE(state->check, put - out, out); out = left; if (( #ifdef GUNZIP state->flags ? hold : #endif REVERSE(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: check matches trailer\n")); } #ifdef GUNZIP state->mode = LENGTH; case LENGTH: if (state->wrap && state->flags) { NEEDBITS(32); if (hold != (state->total & 0xffffffffUL)) { strm->msg = (char *)"incorrect length check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: length matches trailer\n")); } #endif state->mode = DONE; case DONE: ret = Z_STREAM_END; goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; case MEM: return Z_MEM_ERROR; case SYNC: default: return Z_STREAM_ERROR; } /* Return from inflate(), updating the total counts and the check value. If there was no progress during the inflate() call, return a buffer error. Call updatewindow() to create and/or update the window state. Note: a memory error from inflate() is non-recoverable. */ inf_leave: RESTORE(); if (state->wsize || (state->mode < CHECK && out != strm->avail_out)) if (updatewindow(strm, out)) { state->mode = MEM; return Z_MEM_ERROR; } in -= strm->avail_in; out -= strm->avail_out; strm->total_in += in; strm->total_out += out; state->total += out; if (state->wrap && out) strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = state->bits + (state->last ? 64 : 0) + (state->mode == TYPE ? 128 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; } int ZEXPORT inflateEnd(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->window != Z_NULL) ZFREE(strm, state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; } int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { struct inflate_state FAR *state; unsigned long id; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; /* check for correct dictionary id */ if (state->mode == DICT) { id = adler32(0L, Z_NULL, 0); id = adler32(id, dictionary, dictLength); if (id != state->check) return Z_DATA_ERROR; } /* copy dictionary to window */ if (updatewindow(strm, strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } if (dictLength > state->wsize) { zmemcpy(state->window, dictionary + dictLength - state->wsize, state->wsize); state->whave = state->wsize; } else { zmemcpy(state->window + state->wsize - dictLength, dictionary, dictLength); state->whave = dictLength; } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; } int ZEXPORT inflateGetHeader(strm, head) z_streamp strm; gz_headerp head; { struct inflate_state FAR *state; /* check state */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; /* save header structure */ state->head = head; head->done = 0; return Z_OK; } /* Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found or when out of input. When called, *have is the number of pattern bytes found in order so far, in 0..3. On return *have is updated to the new state. If on return *have equals four, then the pattern was found and the return value is how many bytes were read including the last byte of the pattern. If *have is less than four, then the pattern has not been found yet and the return value is len. In the latter case, syncsearch() can be called again with more data and the *have state. *have is initialized to zero for the first call. */ local unsigned syncsearch(have, buf, len) unsigned FAR *have; unsigned char FAR *buf; unsigned len; { unsigned got; unsigned next; got = *have; next = 0; while (next < len && got < 4) { if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) got++; else if (buf[next]) got = 0; else got = 4 - got; next++; } *have = got; return next; } int ZEXPORT inflateSync(strm) z_streamp strm; { unsigned len; /* number of bytes to look at or looked at */ unsigned long in, out; /* temporary to save total_in and total_out */ unsigned char buf[4]; /* to restore bit buffer to byte string */ struct inflate_state FAR *state; /* check parameters */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; /* if first time, start search in bit buffer */ if (state->mode != SYNC) { state->mode = SYNC; state->hold <<= state->bits & 7; state->bits -= state->bits & 7; len = 0; while (state->bits >= 8) { buf[len++] = (unsigned char)(state->hold); state->hold >>= 8; state->bits -= 8; } state->have = 0; syncsearch(&(state->have), buf, len); } /* search available input */ len = syncsearch(&(state->have), strm->next_in, strm->avail_in); strm->avail_in -= len; strm->next_in += len; strm->total_in += len; /* return no joy or set up to restart inflate() on a new block */ if (state->have != 4) return Z_DATA_ERROR; in = strm->total_in; out = strm->total_out; inflateReset(strm); strm->total_in = in; strm->total_out = out; state->mode = TYPE; return Z_OK; } /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ int ZEXPORT inflateSyncPoint(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; return state->mode == STORED && state->bits == 0; } int ZEXPORT inflateCopy(dest, source) z_streamp dest; z_streamp source; { struct inflate_state FAR *state; struct inflate_state FAR *copy; unsigned char FAR *window; unsigned wsize; /* check input */ if (dest == Z_NULL || source == Z_NULL || source->state == Z_NULL || source->zalloc == (alloc_func)0 || source->zfree == (free_func)0) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)source->state; /* allocate space */ copy = (struct inflate_state FAR *) ZALLOC(source, 1, sizeof(struct inflate_state)); if (copy == Z_NULL) return Z_MEM_ERROR; window = Z_NULL; if (state->window != Z_NULL) { window = (unsigned char FAR *) ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); if (window == Z_NULL) { ZFREE(source, copy); return Z_MEM_ERROR; } } /* copy state */ zmemcpy(dest, source, sizeof(z_stream)); zmemcpy(copy, state, sizeof(struct inflate_state)); if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); copy->distcode = copy->codes + (state->distcode - state->codes); } copy->next = copy->codes + (state->next - state->codes); if (window != Z_NULL) { wsize = 1U << state->wbits; zmemcpy(window, state->window, wsize); } copy->window = window; dest->state = (struct internal_state FAR *)copy; return Z_OK; }
gpl-2.0
Split-Screen/android_kernel_motorola_msm8226
drivers/net/usb/gl620a.c
4059
6510
/* * GeneSys GL620USB-A based links * Copyright (C) 2001 by Jiun-Jie Huang <huangjj@genesyslogic.com.tw> * Copyright (C) 2001 by Stanislav Brabec <utx@penguin.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> /* * GeneSys GL620USB-A (www.genesyslogic.com.tw) * * ... should partially interop with the Win32 driver for this hardware. * The GeneSys docs imply there's some NDIS issue motivating this framing. * * Some info from GeneSys: * - GL620USB-A is full duplex; GL620USB is only half duplex for bulk. * (Some cables, like the BAFO-100c, use the half duplex version.) * - For the full duplex model, the low bit of the version code says * which side is which ("left/right"). * - For the half duplex type, a control/interrupt handshake settles * the transfer direction. (That's disabled here, partially coded.) * A control URB would block until other side writes an interrupt. * * Original code from Jiun-Jie Huang <huangjj@genesyslogic.com.tw> * and merged into "usbnet" by Stanislav Brabec <utx@penguin.cz>. */ // control msg write command #define GENELINK_CONNECT_WRITE 0xF0 // interrupt pipe index #define GENELINK_INTERRUPT_PIPE 0x03 // interrupt read buffer size #define INTERRUPT_BUFSIZE 0x08 // interrupt pipe interval value #define GENELINK_INTERRUPT_INTERVAL 0x10 // max transmit packet number per transmit #define GL_MAX_TRANSMIT_PACKETS 32 // max packet length #define GL_MAX_PACKET_LEN 1514 // max receive buffer size #define GL_RCV_BUF_SIZE \ (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4) struct gl_packet { __le32 packet_length; char packet_data [1]; }; struct gl_header { __le32 packet_count; struct gl_packet packets; }; static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct gl_header *header; struct gl_packet *packet; struct sk_buff *gl_skb; u32 size; u32 count; header = (struct gl_header *) skb->data; // get the packet count of the received skb count = le32_to_cpu(header->packet_count); if (count > GL_MAX_TRANSMIT_PACKETS) { dbg("genelink: invalid received packet count %u", count); return 0; } // set the current packet pointer to the first packet packet = &header->packets; // decrement the length for the packet count size 4 bytes skb_pull(skb, 4); while (count > 1) { // get the packet length size = le32_to_cpu(packet->packet_length); // this may be a broken packet if (size > GL_MAX_PACKET_LEN) { dbg("genelink: invalid rx length %d", size); return 0; } // allocate the skb for the individual packet gl_skb = alloc_skb(size, GFP_ATOMIC); if (gl_skb) { // copy the packet data to the new skb memcpy(skb_put(gl_skb, size), packet->packet_data, size); usbnet_skb_return(dev, gl_skb); } // advance to the next packet packet = (struct gl_packet *)&packet->packet_data[size]; count--; // shift the data pointer to the next gl_packet skb_pull(skb, size + 4); } // skip the packet length field 4 bytes skb_pull(skb, 4); if (skb->len > GL_MAX_PACKET_LEN) { dbg("genelink: invalid rx length %d", skb->len); return 0; } return 1; } static struct sk_buff * genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; int length = skb->len; int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); __le32 *packet_count; __le32 *packet_len; // FIXME: magic numbers, bleech padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1; if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (padlen + (4 + 4*1)))) { if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { skb->data = memmove(skb->head + (4 + 4*1), skb->data, skb->len); skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, (4 + 4*1) , padlen, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } // attach the packet count to the header packet_count = (__le32 *) skb_push(skb, (4 + 4*1)); packet_len = packet_count + 1; *packet_count = cpu_to_le32(1); *packet_len = cpu_to_le32(length); // add padding byte if ((skb->len % dev->maxpacket) == 0) skb_put(skb, 1); return skb; } static int genelink_bind(struct usbnet *dev, struct usb_interface *intf) { dev->hard_mtu = GL_RCV_BUF_SIZE; dev->net->hard_header_len += 4; dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in); dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out); return 0; } static const struct driver_info genelink_info = { .description = "Genesys GeneLink", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_GL | FLAG_NO_SETINT, .bind = genelink_bind, .rx_fixup = genelink_rx_fixup, .tx_fixup = genelink_tx_fixup, .in = 1, .out = 2, #ifdef GENELINK_ACK .check_connect =genelink_check_connect, #endif }; static const struct usb_device_id products [] = { { USB_DEVICE(0x05e3, 0x0502), // GL620USB-A .driver_info = (unsigned long) &genelink_info, }, /* NOT: USB_DEVICE(0x05e3, 0x0501), // GL620USB * that's half duplex, not currently supported */ { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver gl620a_driver = { .name = "gl620a", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, }; module_usb_driver(gl620a_driver); MODULE_AUTHOR("Jiun-Jie Huang"); MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables"); MODULE_LICENSE("GPL");
gpl-2.0
sytuxww/android2.3-dm8168
fs/binfmt_som.c
4315
7565
/* * linux/fs/binfmt_som.c * * These are the functions used to load SOM format executables as used * by HP-UX. * * Copyright 1999 Matthew Wilcox <willy@bofh.ai> * based on binfmt_elf which is * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). */ #include <linux/module.h> #include <linux/fs.h> #include <linux/stat.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/binfmts.h> #include <linux/som.h> #include <linux/string.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/shm.h> #include <linux/personality.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <linux/elf.h> static int load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs); static int load_som_library(struct file *); /* * If we don't support core dumping, then supply a NULL so we * don't even try. */ #if 0 static int som_core_dump(struct coredump_params *cprm); #else #define som_core_dump NULL #endif #define SOM_PAGESTART(_v) ((_v) & ~(unsigned long)(SOM_PAGESIZE-1)) #define SOM_PAGEOFFSET(_v) ((_v) & (SOM_PAGESIZE-1)) #define SOM_PAGEALIGN(_v) (((_v) + SOM_PAGESIZE - 1) & ~(SOM_PAGESIZE - 1)) static struct linux_binfmt som_format = { .module = THIS_MODULE, .load_binary = load_som_binary, .load_shlib = load_som_library, .core_dump = som_core_dump, .min_coredump = SOM_PAGESIZE }; /* * create_som_tables() parses the env- and arg-strings in new user * memory and creates the pointer tables from them, and puts their * addresses on the "stack", returning the new stack pointer value. */ static void create_som_tables(struct linux_binprm *bprm) { char **argv, **envp; int argc = bprm->argc; int envc = bprm->envc; unsigned long p; unsigned long *sp; /* Word-align the stack pointer */ sp = (unsigned long *)((bprm->p + 3) & ~3); envp = (char **) sp; sp += envc + 1; argv = (char **) sp; sp += argc + 1; __put_user((unsigned long) envp,++sp); __put_user((unsigned long) argv,++sp); __put_user(argc, ++sp); bprm->p = (unsigned long) sp; p = current->mm->arg_start; while (argc-- > 0) { __put_user((char *)p,argv++); p += strlen_user((char *)p); } __put_user(NULL, argv); current->mm->arg_end = current->mm->env_start = p; while (envc-- > 0) { __put_user((char *)p,envp++); p += strlen_user((char *)p); } __put_user(NULL, envp); current->mm->env_end = p; } static int check_som_header(struct som_hdr *som_ex) { int *buf = (int *)som_ex; int i, ck; if (som_ex->system_id != SOM_SID_PARISC_1_0 && som_ex->system_id != SOM_SID_PARISC_1_1 && som_ex->system_id != SOM_SID_PARISC_2_0) return -ENOEXEC; if (som_ex->a_magic != SOM_EXEC_NONSHARE && som_ex->a_magic != SOM_EXEC_SHARE && som_ex->a_magic != SOM_EXEC_DEMAND) return -ENOEXEC; if (som_ex->version_id != SOM_ID_OLD && som_ex->version_id != SOM_ID_NEW) return -ENOEXEC; ck = 0; for (i=0; i<32; i++) ck ^= buf[i]; if (ck != 0) return -ENOEXEC; return 0; } static int map_som_binary(struct file *file, const struct som_exec_auxhdr *hpuxhdr) { unsigned long code_start, code_size, data_start, data_size; unsigned long bss_start, som_brk; int retval; int prot = PROT_READ | PROT_EXEC; int flags = MAP_FIXED|MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; mm_segment_t old_fs = get_fs(); set_fs(get_ds()); code_start = SOM_PAGESTART(hpuxhdr->exec_tmem); code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize); current->mm->start_code = code_start; current->mm->end_code = code_start + code_size; down_write(&current->mm->mmap_sem); retval = do_mmap(file, code_start, code_size, prot, flags, SOM_PAGESTART(hpuxhdr->exec_tfile)); up_write(&current->mm->mmap_sem); if (retval < 0 && retval > -1024) goto out; data_start = SOM_PAGESTART(hpuxhdr->exec_dmem); data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize); current->mm->start_data = data_start; current->mm->end_data = bss_start = data_start + data_size; down_write(&current->mm->mmap_sem); retval = do_mmap(file, data_start, data_size, prot | PROT_WRITE, flags, SOM_PAGESTART(hpuxhdr->exec_dfile)); up_write(&current->mm->mmap_sem); if (retval < 0 && retval > -1024) goto out; som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize); current->mm->start_brk = current->mm->brk = som_brk; down_write(&current->mm->mmap_sem); retval = do_mmap(NULL, bss_start, som_brk - bss_start, prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0); up_write(&current->mm->mmap_sem); if (retval > 0 || retval < -1024) retval = 0; out: set_fs(old_fs); return retval; } /* * These are the functions used to load SOM executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) { int retval; unsigned int size; unsigned long som_entry; struct som_hdr *som_ex; struct som_exec_auxhdr *hpuxhdr; /* Get the exec-header */ som_ex = (struct som_hdr *) bprm->buf; retval = check_som_header(som_ex); if (retval != 0) goto out; /* Now read in the auxiliary header information */ retval = -ENOMEM; size = som_ex->aux_header_size; if (size > SOM_PAGESIZE) goto out; hpuxhdr = kmalloc(size, GFP_KERNEL); if (!hpuxhdr) goto out; retval = kernel_read(bprm->file, som_ex->aux_header_location, (char *) hpuxhdr, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free; /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->personality = PER_HPUX; setup_new_exec(bprm); /* Set the task size for HP-UX processes such that * the gateway page is outside the address space. * This can be fixed later, but for now, this is much * easier. */ current->thread.task_size = 0xc0000000; /* Set map base to allow enough room for hp-ux heap growth */ current->thread.map_base = 0x80000000; retval = map_som_binary(bprm->file, hpuxhdr); if (retval < 0) goto out_free; som_entry = hpuxhdr->exec_entry; kfree(hpuxhdr); set_binfmt(&som_format); install_exec_creds(bprm); setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); create_som_tables(bprm); current->mm->start_stack = bprm->p; #if 0 printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk); printk("(end_code) %08lx\n" , (unsigned long) current->mm->end_code); printk("(start_code) %08lx\n" , (unsigned long) current->mm->start_code); printk("(end_data) %08lx\n" , (unsigned long) current->mm->end_data); printk("(start_stack) %08lx\n" , (unsigned long) current->mm->start_stack); printk("(brk) %08lx\n" , (unsigned long) current->mm->brk); #endif map_hpux_gateway_page(current,current->mm); start_thread_som(regs, som_entry, bprm->p); return 0; /* error cleanup */ out_free: kfree(hpuxhdr); out: return retval; } static int load_som_library(struct file *f) { /* No lib support in SOM yet. gizza chance.. */ return -ENOEXEC; } /* Install the SOM loader. * N.B. We *rely* on the table being the right size with the * right number of free slots... */ static int __init init_som_binfmt(void) { return register_binfmt(&som_format); } static void __exit exit_som_binfmt(void) { /* Remove the SOM loader. */ unregister_binfmt(&som_format); } core_initcall(init_som_binfmt); module_exit(exit_som_binfmt); MODULE_LICENSE("GPL");
gpl-2.0
multirom-m8/kernel_htc_m8gpe
arch/powerpc/kernel/udbg.c
4571
4554
/* * polling mode stateless debugging stuff, originally for NS16550 Serial Ports * * c 2001 PPC 64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/udbg.h> void (*udbg_putc)(char c); void (*udbg_flush)(void); int (*udbg_getc)(void); int (*udbg_getc_poll)(void); /* * Early debugging facilities. You can enable _one_ of these via .config, * if you do so your kernel _will not boot_ on anything else. Be careful. */ void __init udbg_early_init(void) { #if defined(CONFIG_PPC_EARLY_DEBUG_LPAR) /* For LPAR machines that have an HVC console on vterm 0 */ udbg_init_debug_lpar(); #elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI) /* For LPAR machines that have an HVSI console on vterm 0 */ udbg_init_debug_lpar_hvsi(); #elif defined(CONFIG_PPC_EARLY_DEBUG_G5) /* For use on Apple G5 machines */ udbg_init_pmac_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL) /* RTAS panel debug */ udbg_init_rtas_panel(); #elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE) /* RTAS console debug */ udbg_init_rtas_console(); #elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) /* Maple real mode debug */ udbg_init_maple_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT) udbg_init_debug_beat(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) udbg_init_pas_realmode(); #elif defined(CONFIG_BOOTX_TEXT) udbg_init_btext(); #elif defined(CONFIG_PPC_EARLY_DEBUG_44x) /* PPC44x debug */ udbg_init_44x_as1(); #elif defined(CONFIG_PPC_EARLY_DEBUG_40x) /* PPC40x debug */ udbg_init_40x_realmode(); #elif defined(CONFIG_PPC_EARLY_DEBUG_CPM) udbg_init_cpm(); #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) udbg_init_usbgecko(); #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) udbg_init_wsp(); #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) udbg_init_ehv_bc(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) udbg_init_ps3gelic(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_RAW) udbg_init_debug_opal_raw(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI) udbg_init_debug_opal_hvsi(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG console_loglevel = 10; register_early_udbg_console(); #endif } /* udbg library, used by xmon et al */ void udbg_puts(const char *s) { if (udbg_putc) { char c; if (s && *s != '\0') { while ((c = *s++) != '\0') udbg_putc(c); } if (udbg_flush) udbg_flush(); } #if 0 else { printk("%s", s); } #endif } int udbg_write(const char *s, int n) { int remain = n; char c; if (!udbg_putc) return 0; if (s && *s != '\0') { while (((c = *s++) != '\0') && (remain-- > 0)) { udbg_putc(c); } } if (udbg_flush) udbg_flush(); return n - remain; } int udbg_read(char *buf, int buflen) { char *p = buf; int i, c; if (!udbg_getc) return 0; for (i = 0; i < buflen; ++i) { do { c = udbg_getc(); if (c == -1 && i == 0) return -1; } while (c == 0x11 || c == 0x13); if (c == 0 || c == -1) break; *p++ = c; } return i; } #define UDBG_BUFSIZE 256 void udbg_printf(const char *fmt, ...) { char buf[UDBG_BUFSIZE]; va_list args; va_start(args, fmt); vsnprintf(buf, UDBG_BUFSIZE, fmt, args); udbg_puts(buf); va_end(args); } void __init udbg_progress(char *s, unsigned short hex) { udbg_puts(s); udbg_puts("\n"); } /* * Early boot console based on udbg */ static void udbg_console_write(struct console *con, const char *s, unsigned int n) { udbg_write(s, n); } static struct console udbg_console = { .name = "udbg", .write = udbg_console_write, .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME, .index = 0, }; static int early_console_initialized; /* * Called by setup_system after ppc_md->probe and ppc_md->early_init. * Call it again after setting udbg_putc in ppc_md->setup_arch. */ void __init register_early_udbg_console(void) { if (early_console_initialized) return; if (!udbg_putc) return; if (strstr(boot_command_line, "udbg-immortal")) { printk(KERN_INFO "early console immortal !\n"); udbg_console.flags &= ~CON_BOOT; } early_console_initialized = 1; register_console(&udbg_console); } #if 0 /* if you want to use this as a regular output console */ console_initcall(register_udbg_console); #endif
gpl-2.0
oppo-source/Find7-5.1-kernel-source
arch/um/kernel/irq.c
4827
11302
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c: * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar */ #include "linux/cpumask.h" #include "linux/hardirq.h" #include "linux/interrupt.h" #include "linux/kernel_stat.h" #include "linux/module.h" #include "linux/sched.h" #include "linux/seq_file.h" #include "linux/slab.h" #include "as-layout.h" #include "kern_util.h" #include "os.h" /* * This list is accessed under irq_lock, except in sigio_handler, * where it is safe from being modified. IRQ handlers won't change it - * if an IRQ source has vanished, it will be freed by free_irqs just * before returning from sigio_handler. That will process a separate * list of irqs to free, with its own locking, coming back here to * remove list elements, taking the irq_lock to do so. */ static struct irq_fd *active_fds = NULL; static struct irq_fd **last_irq_ptr = &active_fds; extern void free_irqs(void); void sigio_handler(int sig, struct uml_pt_regs *regs) { struct irq_fd *irq_fd; int n; if (smp_sigio_handler()) return; while (1) { n = os_waiting_for_events(active_fds); if (n <= 0) { if (n == -EINTR) continue; else break; } for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { if (irq_fd->current_events != 0) { irq_fd->current_events = 0; do_IRQ(irq_fd->irq, regs); } } } free_irqs(); } static DEFINE_SPINLOCK(irq_lock); static int activate_fd(int irq, int fd, int type, void *dev_id) { struct pollfd *tmp_pfd; struct irq_fd *new_fd, *irq_fd; unsigned long flags; int events, err, n; err = os_set_fd_async(fd); if (err < 0) goto out; err = -ENOMEM; new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL); if (new_fd == NULL) goto out; if (type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI; else events = UM_POLLOUT; *new_fd = ((struct irq_fd) { .next = NULL, .id = dev_id, .fd = fd, .type = type, .irq = irq, .events = events, .current_events = 0 } ); err = -EBUSY; spin_lock_irqsave(&irq_lock, flags); for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) { if ((irq_fd->fd == fd) && (irq_fd->type == type)) { printk(KERN_ERR "Registering fd %d twice\n", fd); printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq); printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id); goto out_unlock; } } if (type == IRQ_WRITE) fd = -1; tmp_pfd = NULL; n = 0; while (1) { n = os_create_pollfd(fd, events, tmp_pfd, n); if (n == 0) break; /* * n > 0 * It means we couldn't put new pollfd to current pollfds * and tmp_fds is NULL or too small for new pollfds array. * Needed size is equal to n as minimum. * * Here we have to drop the lock in order to call * kmalloc, which might sleep. * If something else came in and changed the pollfds array * so we will not be able to put new pollfd struct to pollfds * then we free the buffer tmp_fds and try again. */ spin_unlock_irqrestore(&irq_lock, flags); kfree(tmp_pfd); tmp_pfd = kmalloc(n, GFP_KERNEL); if (tmp_pfd == NULL) goto out_kfree; spin_lock_irqsave(&irq_lock, flags); } *last_irq_ptr = new_fd; last_irq_ptr = &new_fd->next; spin_unlock_irqrestore(&irq_lock, flags); /* * This calls activate_fd, so it has to be outside the critical * section. */ maybe_sigio_broken(fd, (type == IRQ_READ)); return 0; out_unlock: spin_unlock_irqrestore(&irq_lock, flags); out_kfree: kfree(new_fd); out: return err; } static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg) { unsigned long flags; spin_lock_irqsave(&irq_lock, flags); os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr); spin_unlock_irqrestore(&irq_lock, flags); } struct irq_and_dev { int irq; void *dev; }; static int same_irq_and_dev(struct irq_fd *irq, void *d) { struct irq_and_dev *data = d; return ((irq->irq == data->irq) && (irq->id == data->dev)); } static void free_irq_by_irq_and_dev(unsigned int irq, void *dev) { struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq, .dev = dev }); free_irq_by_cb(same_irq_and_dev, &data); } static int same_fd(struct irq_fd *irq, void *fd) { return (irq->fd == *((int *)fd)); } void free_irq_by_fd(int fd) { free_irq_by_cb(same_fd, &fd); } /* Must be called with irq_lock held */ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out) { struct irq_fd *irq; int i = 0; int fdi; for (irq = active_fds; irq != NULL; irq = irq->next) { if ((irq->fd == fd) && (irq->irq == irqnum)) break; i++; } if (irq == NULL) { printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n", fd); goto out; } fdi = os_get_pollfd(i); if ((fdi != -1) && (fdi != fd)) { printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds " "and pollfds, fd %d vs %d, need %d\n", irq->fd, fdi, fd); irq = NULL; goto out; } *index_out = i; out: return irq; } void reactivate_fd(int fd, int irqnum) { struct irq_fd *irq; unsigned long flags; int i; spin_lock_irqsave(&irq_lock, flags); irq = find_irq_by_fd(fd, irqnum, &i); if (irq == NULL) { spin_unlock_irqrestore(&irq_lock, flags); return; } os_set_pollfd(i, irq->fd); spin_unlock_irqrestore(&irq_lock, flags); add_sigio_fd(fd); } void deactivate_fd(int fd, int irqnum) { struct irq_fd *irq; unsigned long flags; int i; spin_lock_irqsave(&irq_lock, flags); irq = find_irq_by_fd(fd, irqnum, &i); if (irq == NULL) { spin_unlock_irqrestore(&irq_lock, flags); return; } os_set_pollfd(i, -1); spin_unlock_irqrestore(&irq_lock, flags); ignore_sigio_fd(fd); } EXPORT_SYMBOL(deactivate_fd); /* * Called just before shutdown in order to provide a clean exec * environment in case the system is rebooting. No locking because * that would cause a pointless shutdown hang if something hadn't * released the lock. */ int deactivate_all_fds(void) { struct irq_fd *irq; int err; for (irq = active_fds; irq != NULL; irq = irq->next) { err = os_clear_fd_async(irq->fd); if (err) return err; } /* If there is a signal already queued, after unblocking ignore it */ os_set_ioignore(); return 0; } /* * do_IRQ handles all normal device IRQs (the special * SMP cross-CPU interrupts have their own specific * handlers). */ unsigned int do_IRQ(int irq, struct uml_pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs); irq_enter(); generic_handle_irq(irq); irq_exit(); set_irq_regs(old_regs); return 1; } int um_request_irq(unsigned int irq, int fd, int type, irq_handler_t handler, unsigned long irqflags, const char * devname, void *dev_id) { int err; if (fd != -1) { err = activate_fd(irq, fd, type, dev_id); if (err) return err; } return request_irq(irq, handler, irqflags, devname, dev_id); } EXPORT_SYMBOL(um_request_irq); EXPORT_SYMBOL(reactivate_fd); /* * irq_chip must define at least enable/disable and ack when * the edge handler is used. */ static void dummy(struct irq_data *d) { } /* This is used for everything else than the timer. */ static struct irq_chip normal_irq_type = { .name = "SIGIO", .release = free_irq_by_irq_and_dev, .irq_disable = dummy, .irq_enable = dummy, .irq_ack = dummy, }; static struct irq_chip SIGVTALRM_irq_type = { .name = "SIGVTALRM", .release = free_irq_by_irq_and_dev, .irq_disable = dummy, .irq_enable = dummy, .irq_ack = dummy, }; void __init init_IRQ(void) { int i; irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq); for (i = 1; i < NR_IRQS; i++) irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq); } /* * IRQ stack entry and exit: * * Unlike i386, UML doesn't receive IRQs on the normal kernel stack * and switch over to the IRQ stack after some preparation. We use * sigaltstack to receive signals on a separate stack from the start. * These two functions make sure the rest of the kernel won't be too * upset by being on a different stack. The IRQ stack has a * thread_info structure at the bottom so that current et al continue * to work. * * to_irq_stack copies the current task's thread_info to the IRQ stack * thread_info and sets the tasks's stack to point to the IRQ stack. * * from_irq_stack copies the thread_info struct back (flags may have * been modified) and resets the task's stack pointer. * * Tricky bits - * * What happens when two signals race each other? UML doesn't block * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal * could arrive while a previous one is still setting up the * thread_info. * * There are three cases - * The first interrupt on the stack - sets up the thread_info and * handles the interrupt * A nested interrupt interrupting the copying of the thread_info - * can't handle the interrupt, as the stack is in an unknown state * A nested interrupt not interrupting the copying of the * thread_info - doesn't do any setup, just handles the interrupt * * The first job is to figure out whether we interrupted stack setup. * This is done by xchging the signal mask with thread_info->pending. * If the value that comes back is zero, then there is no setup in * progress, and the interrupt can be handled. If the value is * non-zero, then there is stack setup in progress. In order to have * the interrupt handled, we leave our signal in the mask, and it will * be handled by the upper handler after it has set up the stack. * * Next is to figure out whether we are the outer handler or a nested * one. As part of setting up the stack, thread_info->real_thread is * set to non-NULL (and is reset to NULL on exit). This is the * nesting indicator. If it is non-NULL, then the stack is already * set up and the handler can run. */ static unsigned long pending_mask; unsigned long to_irq_stack(unsigned long *mask_out) { struct thread_info *ti; unsigned long mask, old; int nested; mask = xchg(&pending_mask, *mask_out); if (mask != 0) { /* * If any interrupts come in at this point, we want to * make sure that their bits aren't lost by our * putting our bit in. So, this loop accumulates bits * until xchg returns the same value that we put in. * When that happens, there were no new interrupts, * and pending_mask contains a bit for each interrupt * that came in. */ old = *mask_out; do { old |= mask; mask = xchg(&pending_mask, old); } while (mask != old); return 1; } ti = current_thread_info(); nested = (ti->real_thread != NULL); if (!nested) { struct task_struct *task; struct thread_info *tti; task = cpu_tasks[ti->cpu].task; tti = task_thread_info(task); *ti = *tti; ti->real_thread = tti; task->stack = ti; } mask = xchg(&pending_mask, 0); *mask_out |= mask | nested; return 0; } unsigned long from_irq_stack(int nested) { struct thread_info *ti, *to; unsigned long mask; ti = current_thread_info(); pending_mask = 1; to = ti->real_thread; current->stack = to; ti->real_thread = NULL; *to = *ti; mask = xchg(&pending_mask, 0); return mask & ~1; }
gpl-2.0
chenzhiwo/linux-sunxi
arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
4827
4947
/* * arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c * * Marvell Orion-VoIP FXO Reference Design Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/mach-types.h> #include <asm/leds.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * RD-88F5181L FXO Info ****************************************************************************/ /* * 8M NOR flash Device bus boot chip select */ #define RD88F5181L_FXO_NOR_BOOT_BASE 0xff800000 #define RD88F5181L_FXO_NOR_BOOT_SIZE SZ_8M /***************************************************************************** * 8M NOR Flash on Device bus Boot chip select ****************************************************************************/ static struct physmap_flash_data rd88f5181l_fxo_nor_boot_flash_data = { .width = 1, }; static struct resource rd88f5181l_fxo_nor_boot_flash_resource = { .flags = IORESOURCE_MEM, .start = RD88F5181L_FXO_NOR_BOOT_BASE, .end = RD88F5181L_FXO_NOR_BOOT_BASE + RD88F5181L_FXO_NOR_BOOT_SIZE - 1, }; static struct platform_device rd88f5181l_fxo_nor_boot_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &rd88f5181l_fxo_nor_boot_flash_data, }, .num_resources = 1, .resource = &rd88f5181l_fxo_nor_boot_flash_resource, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int rd88f5181l_fxo_mpp_modes[] __initdata = { MPP0_GPIO, /* LED1 CardBus LED (front panel) */ MPP1_GPIO, /* PCI_intA */ MPP2_GPIO, /* Hard Reset / Factory Init*/ MPP3_GPIO, /* FXS or DAA select */ MPP4_GPIO, /* LED6 - phone LED (front panel) */ MPP5_GPIO, /* LED5 - phone LED (front panel) */ MPP6_PCI_CLK, /* CPU PCI refclk */ MPP7_PCI_CLK, /* PCI/PCIe refclk */ MPP8_GPIO, /* CardBus reset */ MPP9_GPIO, /* GE_RXERR */ MPP10_GPIO, /* LED2 MiniPCI LED (front panel) */ MPP11_GPIO, /* Lifeline control */ MPP12_GIGE, /* GE_TXD[4] */ MPP13_GIGE, /* GE_TXD[5] */ MPP14_GIGE, /* GE_TXD[6] */ MPP15_GIGE, /* GE_TXD[7] */ MPP16_GIGE, /* GE_RXD[4] */ MPP17_GIGE, /* GE_RXD[5] */ MPP18_GIGE, /* GE_RXD[6] */ MPP19_GIGE, /* GE_RXD[7] */ 0, }; static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { .phy_addr = MV643XX_ETH_PHY_NONE, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = { .port_names[0] = "lan2", .port_names[1] = "lan1", .port_names[2] = "wan", .port_names[3] = "cpu", .port_names[5] = "lan4", .port_names[7] = "lan3", }; static struct dsa_platform_data rd88f5181l_fxo_switch_plat_data = { .nr_chips = 1, .chip = &rd88f5181l_fxo_switch_chip_data, }; static void __init rd88f5181l_fxo_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(rd88f5181l_fxo_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_fxo_eth_data); orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data, NO_IRQ); orion5x_uart0_init(); orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, RD88F5181L_FXO_NOR_BOOT_SIZE); platform_device_register(&rd88f5181l_fxo_nor_boot_flash); } static int __init rd88f5181l_fxo_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; /* * Mini-PCI / Cardbus slot. */ return gpio_to_irq(1); } static struct hw_pci rd88f5181l_fxo_pci __initdata = { .nr_controllers = 2, .swizzle = pci_std_swizzle, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = rd88f5181l_fxo_pci_map_irq, }; static int __init rd88f5181l_fxo_pci_init(void) { if (machine_is_rd88f5181l_fxo()) { orion5x_pci_set_cardbus_mode(); pci_common_init(&rd88f5181l_fxo_pci); } return 0; } subsys_initcall(rd88f5181l_fxo_pci_init); MACHINE_START(RD88F5181L_FXO, "Marvell Orion-VoIP FXO Reference Design") /* Maintainer: Nicolas Pitre <nico@marvell.com> */ .atag_offset = 0x100, .init_machine = rd88f5181l_fxo_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
letama/android_kernel_nozomi
arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
4827
4947
/* * arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c * * Marvell Orion-VoIP FXO Reference Design Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/mach-types.h> #include <asm/leds.h> #include <asm/mach/arch.h> #include <asm/mach/pci.h> #include <mach/orion5x.h> #include "common.h" #include "mpp.h" /***************************************************************************** * RD-88F5181L FXO Info ****************************************************************************/ /* * 8M NOR flash Device bus boot chip select */ #define RD88F5181L_FXO_NOR_BOOT_BASE 0xff800000 #define RD88F5181L_FXO_NOR_BOOT_SIZE SZ_8M /***************************************************************************** * 8M NOR Flash on Device bus Boot chip select ****************************************************************************/ static struct physmap_flash_data rd88f5181l_fxo_nor_boot_flash_data = { .width = 1, }; static struct resource rd88f5181l_fxo_nor_boot_flash_resource = { .flags = IORESOURCE_MEM, .start = RD88F5181L_FXO_NOR_BOOT_BASE, .end = RD88F5181L_FXO_NOR_BOOT_BASE + RD88F5181L_FXO_NOR_BOOT_SIZE - 1, }; static struct platform_device rd88f5181l_fxo_nor_boot_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &rd88f5181l_fxo_nor_boot_flash_data, }, .num_resources = 1, .resource = &rd88f5181l_fxo_nor_boot_flash_resource, }; /***************************************************************************** * General Setup ****************************************************************************/ static unsigned int rd88f5181l_fxo_mpp_modes[] __initdata = { MPP0_GPIO, /* LED1 CardBus LED (front panel) */ MPP1_GPIO, /* PCI_intA */ MPP2_GPIO, /* Hard Reset / Factory Init*/ MPP3_GPIO, /* FXS or DAA select */ MPP4_GPIO, /* LED6 - phone LED (front panel) */ MPP5_GPIO, /* LED5 - phone LED (front panel) */ MPP6_PCI_CLK, /* CPU PCI refclk */ MPP7_PCI_CLK, /* PCI/PCIe refclk */ MPP8_GPIO, /* CardBus reset */ MPP9_GPIO, /* GE_RXERR */ MPP10_GPIO, /* LED2 MiniPCI LED (front panel) */ MPP11_GPIO, /* Lifeline control */ MPP12_GIGE, /* GE_TXD[4] */ MPP13_GIGE, /* GE_TXD[5] */ MPP14_GIGE, /* GE_TXD[6] */ MPP15_GIGE, /* GE_TXD[7] */ MPP16_GIGE, /* GE_RXD[4] */ MPP17_GIGE, /* GE_RXD[5] */ MPP18_GIGE, /* GE_RXD[6] */ MPP19_GIGE, /* GE_RXD[7] */ 0, }; static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { .phy_addr = MV643XX_ETH_PHY_NONE, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = { .port_names[0] = "lan2", .port_names[1] = "lan1", .port_names[2] = "wan", .port_names[3] = "cpu", .port_names[5] = "lan4", .port_names[7] = "lan3", }; static struct dsa_platform_data rd88f5181l_fxo_switch_plat_data = { .nr_chips = 1, .chip = &rd88f5181l_fxo_switch_chip_data, }; static void __init rd88f5181l_fxo_init(void) { /* * Setup basic Orion functions. Need to be called early. */ orion5x_init(); orion5x_mpp_conf(rd88f5181l_fxo_mpp_modes); /* * Configure peripherals. */ orion5x_ehci0_init(); orion5x_eth_init(&rd88f5181l_fxo_eth_data); orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data, NO_IRQ); orion5x_uart0_init(); orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, RD88F5181L_FXO_NOR_BOOT_SIZE); platform_device_register(&rd88f5181l_fxo_nor_boot_flash); } static int __init rd88f5181l_fxo_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; /* * Check for devices with hard-wired IRQs. */ irq = orion5x_pci_map_irq(dev, slot, pin); if (irq != -1) return irq; /* * Mini-PCI / Cardbus slot. */ return gpio_to_irq(1); } static struct hw_pci rd88f5181l_fxo_pci __initdata = { .nr_controllers = 2, .swizzle = pci_std_swizzle, .setup = orion5x_pci_sys_setup, .scan = orion5x_pci_sys_scan_bus, .map_irq = rd88f5181l_fxo_pci_map_irq, }; static int __init rd88f5181l_fxo_pci_init(void) { if (machine_is_rd88f5181l_fxo()) { orion5x_pci_set_cardbus_mode(); pci_common_init(&rd88f5181l_fxo_pci); } return 0; } subsys_initcall(rd88f5181l_fxo_pci_init); MACHINE_START(RD88F5181L_FXO, "Marvell Orion-VoIP FXO Reference Design") /* Maintainer: Nicolas Pitre <nico@marvell.com> */ .atag_offset = 0x100, .init_machine = rd88f5181l_fxo_init, .map_io = orion5x_map_io, .init_early = orion5x_init_early, .init_irq = orion5x_init_irq, .timer = &orion5x_timer, .fixup = tag_fixup_mem32, .restart = orion5x_restart, MACHINE_END
gpl-2.0
RaymanFX/kernel_samsung_lt03wifi
drivers/net/ethernet/8390/es3210.c
5083
12788
/* es3210.c Linux driver for Racal-Interlan ES3210 EISA Network Adapter Copyright (C) 1996, Paul Gortmaker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Information and Code Sources: 1) The existing myriad of Linux 8390 drivers written by Donald Becker. 2) Once again Russ Nelson's asm packet driver provided additional info. 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. Too bad it doesn't work -- see below. The ES3210 is an EISA shared memory NS8390 implementation. Note that all memory copies to/from the board must be 32bit transfers. Which rules out using eth_io_copy_and_sum() in this driver. Apparently there are two slightly different revisions of the card, since there are two distinct EISA cfg files (!rii0101.cfg and !rii0102.cfg) One has media select in the cfg file and the other doesn't. Hopefully this will work with either. That is about all I can tell you about it, having never actually even seen one of these cards. :) Try http://www.interlan.com if you want more info. Thanks go to Mark Salazar for testing v0.02 of this driver. Bugs, to-fix, etc: 1) The EISA cfg ports that are *supposed* to have the IRQ and shared mem values just read 0xff all the time. Hrrmpf. Apparently the same happens with the packet driver as the code for reading these registers is disabled there. In the meantime, boot with: ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and shared memory detection. (The i/o port detection is okay.) 2) Module support currently untested. Probably works though. */ static const char version[] = "es3210.c: Driver revision v0.03, 14/09/96\n"; #include <linux/module.h> #include <linux/eisa.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <asm/io.h> #include "8390.h" static int es_probe1(struct net_device *dev, int ioaddr); static void es_reset_8390(struct net_device *dev); static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page); #define ES_START_PG 0x00 /* First page of TX buffer */ #define ES_STOP_PG 0x40 /* Last page +1 of RX ring */ #define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */ #define ES_ID_PORT 0xc80 /* Same for all EISA cards */ #define ES_SA_PROM 0xc90 /* Start of e'net addr. */ #define ES_RESET_PORT 0xc84 /* From the packet driver source */ #define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */ #define ES_ADDR0 0x02 /* 3 byte vendor prefix */ #define ES_ADDR1 0x07 #define ES_ADDR2 0x01 /* * Two card revisions. EISA ID's are always rev. minor, rev. major,, and * then the three vendor letters stored in 5 bits each, with an "a" = 1. * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA * config utility determines automagically what config file(s) to use. */ #define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */ #define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */ #define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */ #define ES_CFG2 0xcc1 #define ES_CFG3 0xcc2 #define ES_CFG4 0xcc3 #define ES_CFG5 0xcc4 #define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */ /* * You can OR any of the following bits together and assign it * to ES_DEBUG to get verbose driver info during operation. * Some of these don't do anything yet. */ #define ES_D_PROBE 0x01 #define ES_D_RX_PKT 0x02 #define ES_D_TX_PKT 0x04 #define ED_D_IRQ 0x08 #define ES_DEBUG 0 static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10}; static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15}; /* * Probe for the card. The best way is to read the EISA ID if it * is known. Then we check the prefix of the station address * PROM for a match against the Racal-Interlan assigned value. */ static int __init do_es_probe(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; int irq = dev->irq; int mem_start = dev->mem_start; if (ioaddr > 0x1ff) /* Check a single specified location. */ return es_probe1(dev, ioaddr); else if (ioaddr > 0) /* Don't probe at all. */ return -ENXIO; if (!EISA_bus) { #if ES_DEBUG & ES_D_PROBE printk("es3210.c: Not EISA bus. Not probing high ports.\n"); #endif return -ENXIO; } /* EISA spec allows for up to 16 slots, but 8 is typical. */ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { if (es_probe1(dev, ioaddr) == 0) return 0; dev->irq = irq; dev->mem_start = mem_start; } return -ENODEV; } #ifndef MODULE struct net_device * __init es_probe(int unit) { struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_es_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static int __init es_probe1(struct net_device *dev, int ioaddr) { int i, retval; unsigned long eisa_id; if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) return -ENODEV; #if ES_DEBUG & ES_D_PROBE printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT)); printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n", inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3), inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); #endif /* Check the EISA ID of the card. */ eisa_id = inl(ioaddr + ES_ID_PORT); if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { retval = -ENODEV; goto out; } for (i = 0; i < ETH_ALEN ; i++) dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); /* Check the Racal vendor ID as well. */ if (dev->dev_addr[0] != ES_ADDR0 || dev->dev_addr[1] != ES_ADDR1 || dev->dev_addr[2] != ES_ADDR2) { printk("es3210.c: card not found %pM (invalid_prefix).\n", dev->dev_addr); retval = -ENODEV; goto out; } printk("es3210.c: ES3210 rev. %ld at %#x, node %pM", eisa_id>>24, ioaddr, dev->dev_addr); /* Snarf the interrupt now. */ if (dev->irq == 0) { unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07; unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe; if (hi_irq != 0) { dev->irq = hi_irq_map[hi_irq - 1]; } else { int i = 0; while (lo_irq > (1<<i)) i++; dev->irq = lo_irq_map[i]; } printk(" using IRQ %d", dev->irq); #if ES_DEBUG & ES_D_PROBE printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n", hi_irq, lo_irq, dev->irq); #endif } else { if (dev->irq == 2) dev->irq = 9; /* Doh! */ printk(" assigning IRQ %d", dev->irq); } if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) { printk (" unable to get IRQ %d.\n", dev->irq); retval = -EAGAIN; goto out; } if (dev->mem_start == 0) { unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0; unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07; if (mem_enabled != 0x80) { printk(" shared mem disabled - giving up\n"); retval = -ENXIO; goto out1; } dev->mem_start = 0xC0000 + mem_bits*0x4000; printk(" using "); } else { printk(" assigning "); } ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256); if (!ei_status.mem) { printk("ioremap failed - giving up\n"); retval = -ENXIO; goto out1; } dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256; printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1); #if ES_DEBUG & ES_D_PROBE if (inb(ioaddr + ES_CFG5)) printk("es3210: Warning - DMA channel enabled, but not used here.\n"); #endif /* Note, point at the 8390, and not the card... */ dev->base_addr = ioaddr + ES_NIC_OFFSET; ei_status.name = "ES3210"; ei_status.tx_start_page = ES_START_PG; ei_status.rx_start_page = ES_START_PG + TX_PAGES; ei_status.stop_page = ES_STOP_PG; ei_status.word16 = 1; if (ei_debug > 0) printk(version); ei_status.reset_8390 = &es_reset_8390; ei_status.block_input = &es_block_input; ei_status.block_output = &es_block_output; ei_status.get_8390_hdr = &es_get_8390_hdr; dev->netdev_ops = &ei_netdev_ops; NS8390_init(dev, 0); retval = register_netdev(dev); if (retval) goto out1; return 0; out1: free_irq(dev->irq, dev); out: release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT); return retval; } /* * Reset as per the packet driver method. Judging by the EISA cfg * file, this just toggles the "Board Enable" bits (bit 2 and 0). */ static void es_reset_8390(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; unsigned long end; outb(0x04, ioaddr + ES_RESET_PORT); if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name); end = jiffies + 2*HZ/100; while ((signed)(end - jiffies) > 0) continue; ei_status.txing = 0; outb(0x01, ioaddr + ES_RESET_PORT); if (ei_debug > 1) printk("reset done\n"); } /* * Note: In the following three functions is the implicit assumption * that the associated memcpy will only use "rep; movsl" as long as * we keep the counts as some multiple of doublewords. This is a * requirement of the hardware, and also prevents us from using * eth_io_copy_and_sum() since we can't guarantee it will limit * itself to doubleword access. */ /* * Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. (A single doubleword.) */ static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ } /* * Block input and output are easy on shared memory ethercards, the only * complication is when the ring buffer wraps. The count will already * be rounded up to a doubleword value via es_get_8390_hdr() above. */ static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256; if (ring_offset + count > ES_STOP_PG*256) { /* Packet wraps over end of ring buffer. */ int semi_count = ES_STOP_PG*256 - ring_offset; memcpy_fromio(skb->data, xfer_start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem, count); } else { /* Packet is in one chunk. */ memcpy_fromio(skb->data, xfer_start, count); } } static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8); count = (count + 3) & ~3; /* Round up to doubleword */ memcpy_toio(shmem, buf, count); } #ifdef MODULE #define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */ #define NAMELEN 8 /* # of chars for storing dev->name */ static struct net_device *dev_es3210[MAX_ES_CARDS]; static int io[MAX_ES_CARDS]; static int irq[MAX_ES_CARDS]; static int mem[MAX_ES_CARDS]; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s)"); MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); MODULE_LICENSE("GPL"); int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { if (io[this_dev] == 0 && this_dev != 0) break; dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; dev->base_addr = io[this_dev]; dev->mem_start = mem[this_dev]; if (do_es_probe(dev) == 0) { dev_es3210[found++] = dev; continue; } free_netdev(dev); printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]); break; } if (found) return 0; return -ENXIO; } static void cleanup_card(struct net_device *dev) { free_irq(dev->irq, dev); release_region(dev->base_addr, ES_IO_EXTENT); iounmap(ei_status.mem); } void __exit cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) { struct net_device *dev = dev_es3210[this_dev]; if (dev) { unregister_netdev(dev); cleanup_card(dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
zlaja/android_kernel_lge_msm8610
net/ax25/ax25_ds_subr.c
5083
5217
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/spinlock.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> void ax25_ds_nr_error_recovery(ax25_cb *ax25) { ax25_ds_establish_data_link(ax25); } /* * dl1bke 960114: transmit I frames on DAMA poll */ void ax25_ds_enquiry_response(ax25_cb *ax25) { ax25_cb *ax25o; struct hlist_node *node; /* Please note that neither DK4EG's nor DG2FEF's * DAMA spec mention the following behaviour as seen * with TheFirmware: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * DL1BKE->DB0ACH <RR R F R0> * * The Flexnet DAMA Master implementation apparently * insists on the "proper" AX.25 behaviour: * * DB0ACH->DL1BKE <RR C P R0> [DAMA] * DL1BKE->DB0ACH <RR R F R0> * DL1BKE->DB0ACH <I NR=0 NS=0> * DL1BKE-7->DB0PRA-6 DB0ACH <I C S3 R5> * * Flexnet refuses to send us *any* I frame if we send * a REJ in case AX25_COND_REJECT is set. It is superfluous in * this mode anyway (a RR or RNR invokes the retransmission). * Is this a Flexnet bug? */ ax25_std_enquiry_response(ax25); if (!(ax25->condition & AX25_COND_PEER_RX_BUSY)) { ax25_requeue_frames(ax25); ax25_kick(ax25); } if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2 || skb_peek(&ax25->ack_queue) != NULL) ax25_ds_t1_timeout(ax25); else ax25->n2count = 0; ax25_start_t3timer(ax25); ax25_ds_set_timer(ax25->ax25_dev); spin_lock(&ax25_list_lock); ax25_for_each(ax25o, node, &ax25_list) { if (ax25o == ax25) continue; if (ax25o->ax25_dev != ax25->ax25_dev) continue; if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2) { ax25_ds_t1_timeout(ax25o); continue; } if (!(ax25o->condition & AX25_COND_PEER_RX_BUSY) && ax25o->state == AX25_STATE_3) { ax25_requeue_frames(ax25o); ax25_kick(ax25o); } if (ax25o->state == AX25_STATE_1 || ax25o->state == AX25_STATE_2 || skb_peek(&ax25o->ack_queue) != NULL) ax25_ds_t1_timeout(ax25o); /* do not start T3 for listening sockets (tnx DD8NE) */ if (ax25o->state != AX25_STATE_0) ax25_start_t3timer(ax25o); } spin_unlock(&ax25_list_lock); } void ax25_ds_establish_data_link(ax25_cb *ax25) { ax25->condition &= AX25_COND_DAMA_MODE; ax25->n2count = 0; ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_start_t3timer(ax25); } /* * :::FIXME::: * This is a kludge. Not all drivers recognize kiss commands. * We need a driver level request to switch duplex mode, that does * either SCC changing, PI config or KISS as required. Currently * this request isn't reliable. */ static void ax25_kiss_cmd(ax25_dev *ax25_dev, unsigned char cmd, unsigned char param) { struct sk_buff *skb; unsigned char *p; if (ax25_dev->dev == NULL) return; if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL) return; skb_reset_network_header(skb); p = skb_put(skb, 2); *p++ = cmd; *p++ = param; skb->protocol = ax25_type_trans(skb, ax25_dev->dev); dev_queue_xmit(skb); } /* * A nasty problem arises if we count the number of DAMA connections * wrong, especially when connections on the device already existed * and our network node (or the sysop) decides to turn on DAMA Master * mode. We thus flag the 'real' slave connections with * ax25->dama_slave=1 and look on every disconnect if still slave * connections exist. */ static int ax25_check_dama_slave(ax25_dev *ax25_dev) { ax25_cb *ax25; int res = 0; struct hlist_node *node; spin_lock(&ax25_list_lock); ax25_for_each(ax25, node, &ax25_list) if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { res = 1; break; } spin_unlock(&ax25_list_lock); return res; } static void ax25_dev_dama_on(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave == 0) ax25_kiss_cmd(ax25_dev, 5, 1); ax25_dev->dama.slave = 1; ax25_ds_set_timer(ax25_dev); } void ax25_dev_dama_off(ax25_dev *ax25_dev) { if (ax25_dev == NULL) return; if (ax25_dev->dama.slave && !ax25_check_dama_slave(ax25_dev)) { ax25_kiss_cmd(ax25_dev, 5, 0); ax25_dev->dama.slave = 0; ax25_ds_del_timer(ax25_dev); } } void ax25_dama_on(ax25_cb *ax25) { ax25_dev_dama_on(ax25->ax25_dev); ax25->condition |= AX25_COND_DAMA_MODE; } void ax25_dama_off(ax25_cb *ax25) { ax25->condition &= ~AX25_COND_DAMA_MODE; ax25_dev_dama_off(ax25->ax25_dev); }
gpl-2.0
omnirom/android_kernel_motorola_msm8226
drivers/rtc/rtc-pl030.c
7387
3980
/* * linux/drivers/rtc/rtc-pl030.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/slab.h> #define RTC_DR (0) #define RTC_MR (4) #define RTC_STAT (8) #define RTC_EOI (8) #define RTC_LR (12) #define RTC_CR (16) #define RTC_CR_MIE (1 << 0) struct pl030_rtc { struct rtc_device *rtc; void __iomem *base; }; static irqreturn_t pl030_interrupt(int irq, void *dev_id) { struct pl030_rtc *rtc = dev_id; writel(0, rtc->base + RTC_EOI); return IRQ_HANDLED; } static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time); return 0; } static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); unsigned long time; int ret; /* * At the moment, we can only deal with non-wildcarded alarm times. */ ret = rtc_valid_tm(&alrm->time); if (ret == 0) ret = rtc_tm_to_time(&alrm->time, &time); if (ret == 0) writel(time, rtc->base + RTC_MR); return ret; } static int pl030_read_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time_to_tm(readl(rtc->base + RTC_DR), tm); return 0; } /* * Set the RTC time. Unfortunately, we can't accurately set * the point at which the counter updates. * * Also, since RTC_LR is transferred to RTC_CR on next rising * edge of the 1Hz clock, we must write the time one second * in advance. */ static int pl030_set_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); unsigned long time; int ret; ret = rtc_tm_to_time(tm, &time); if (ret == 0) writel(time + 1, rtc->base + RTC_LR); return ret; } static const struct rtc_class_ops pl030_ops = { .read_time = pl030_read_time, .set_time = pl030_set_time, .read_alarm = pl030_read_alarm, .set_alarm = pl030_set_alarm, }; static int pl030_probe(struct amba_device *dev, const struct amba_id *id) { struct pl030_rtc *rtc; int ret; ret = amba_request_regions(dev, NULL); if (ret) goto err_req; rtc = kmalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) { ret = -ENOMEM; goto err_rtc; } rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; goto err_map; } __raw_writel(0, rtc->base + RTC_CR); __raw_writel(0, rtc->base + RTC_EOI); amba_set_drvdata(dev, rtc); ret = request_irq(dev->irq[0], pl030_interrupt, 0, "rtc-pl030", rtc); if (ret) goto err_irq; rtc->rtc = rtc_device_register("pl030", &dev->dev, &pl030_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); goto err_reg; } return 0; err_reg: free_irq(dev->irq[0], rtc); err_irq: iounmap(rtc->base); err_map: kfree(rtc); err_rtc: amba_release_regions(dev); err_req: return ret; } static int pl030_remove(struct amba_device *dev) { struct pl030_rtc *rtc = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); writel(0, rtc->base + RTC_CR); free_irq(dev->irq[0], rtc); rtc_device_unregister(rtc->rtc); iounmap(rtc->base); kfree(rtc); amba_release_regions(dev); return 0; } static struct amba_id pl030_ids[] = { { .id = 0x00041030, .mask = 0x000fffff, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl030_ids); static struct amba_driver pl030_driver = { .drv = { .name = "rtc-pl030", }, .probe = pl030_probe, .remove = pl030_remove, .id_table = pl030_ids, }; module_amba_driver(pl030_driver); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("ARM AMBA PL030 RTC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
XXMrHyde/android_kernel_lge_hammerhead
fs/logfs/dev_bdev.c
7899
8617
/* * fs/logfs/dev_bdev.c - Device access methods for block devices * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/gfp.h> #include <linux/prefetch.h> #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) static void request_complete(struct bio *bio, int err) { complete((struct completion *)bio->bi_private); } static int sync_request(struct page *page, struct block_device *bdev, int rw) { struct bio bio; struct bio_vec bio_vec; struct completion complete; bio_init(&bio); bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; bio_vec.bv_offset = 0; bio.bi_vcnt = 1; bio.bi_idx = 0; bio.bi_size = PAGE_SIZE; bio.bi_bdev = bdev; bio.bi_sector = page->index * (PAGE_SIZE >> 9); init_completion(&complete); bio.bi_private = &complete; bio.bi_end_io = request_complete; submit_bio(rw, &bio); wait_for_completion(&complete); return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO; } static int bdev_readpage(void *_sb, struct page *page) { struct super_block *sb = _sb; struct block_device *bdev = logfs_super(sb)->s_bdev; int err; err = sync_request(page, bdev, READ); if (err) { ClearPageUptodate(page); SetPageError(page); } else { SetPageUptodate(page); ClearPageError(page); } unlock_page(page); return err; } static DECLARE_WAIT_QUEUE_HEAD(wq); static void writeseg_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct super_block *sb = bio->bi_private; struct logfs_super *super = logfs_super(sb); struct page *page; BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ BUG_ON(err); BUG_ON(bio->bi_vcnt == 0); do { page = bvec->bv_page; if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); end_page_writeback(page); page_cache_release(page); } while (bvec >= bio->bi_io_vec); bio_put(bio); if (atomic_dec_and_test(&super->s_pending_writes)) wake_up(&wq); } static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; struct request_queue *q = bdev_get_queue(sb->s_bdev); unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; if (max_pages > BIO_MAX_PAGES) max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); for (i = 0; i < nr_pages; i++) { if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; bio->bi_idx = 0; bio->bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); ofs += i * PAGE_SIZE; index += i; nr_pages -= i; i = 0; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); } page = find_lock_page(mapping, index + i); BUG_ON(!page); bio->bi_io_vec[i].bv_page = page; bio->bi_io_vec[i].bv_len = PAGE_SIZE; bio->bi_io_vec[i].bv_offset = 0; BUG_ON(PageWriteback(page)); set_page_writeback(page); unlock_page(page); } bio->bi_vcnt = nr_pages; bio->bi_idx = 0; bio->bi_size = nr_pages * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = writeseg_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); return 0; } static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len) { struct logfs_super *super = logfs_super(sb); int head; BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO); if (len == 0) { /* This can happen when the object fit perfectly into a * segment, the segment gets written per sync and subsequently * closed. */ return; } head = ofs & (PAGE_SIZE - 1); if (head) { ofs -= head; len += head; } len = PAGE_ALIGN(len); __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); } static void erase_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct super_block *sb = bio->bi_private; struct logfs_super *super = logfs_super(sb); BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ BUG_ON(err); BUG_ON(bio->bi_vcnt == 0); bio_put(bio); if (atomic_dec_and_test(&super->s_pending_writes)) wake_up(&wq); } static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, size_t nr_pages) { struct logfs_super *super = logfs_super(sb); struct bio *bio; struct request_queue *q = bdev_get_queue(sb->s_bdev); unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); int i; if (max_pages > BIO_MAX_PAGES) max_pages = BIO_MAX_PAGES; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); for (i = 0; i < nr_pages; i++) { if (i >= max_pages) { /* Block layer cannot split bios :( */ bio->bi_vcnt = i; bio->bi_idx = 0; bio->bi_size = i * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = erase_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); ofs += i * PAGE_SIZE; index += i; nr_pages -= i; i = 0; bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); } bio->bi_io_vec[i].bv_page = super->s_erase_page; bio->bi_io_vec[i].bv_len = PAGE_SIZE; bio->bi_io_vec[i].bv_offset = 0; } bio->bi_vcnt = nr_pages; bio->bi_idx = 0; bio->bi_size = nr_pages * PAGE_SIZE; bio->bi_bdev = super->s_bdev; bio->bi_sector = ofs >> 9; bio->bi_private = sb; bio->bi_end_io = erase_end_io; atomic_inc(&super->s_pending_writes); submit_bio(WRITE, bio); return 0; } static int bdev_erase(struct super_block *sb, loff_t to, size_t len, int ensure_write) { struct logfs_super *super = logfs_super(sb); BUG_ON(to & (PAGE_SIZE - 1)); BUG_ON(len & (PAGE_SIZE - 1)); if (super->s_flags & LOGFS_SB_FLAG_RO) return -EROFS; if (ensure_write) { /* * Object store doesn't care whether erases happen or not. * But for the journal they are required. Otherwise a scan * can find an old commit entry and assume it is the current * one, travelling back in time. */ do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT); } return 0; } static void bdev_sync(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); wait_event(wq, atomic_read(&super->s_pending_writes) == 0); } static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; filler_t *filler = bdev_readpage; *ofs = 0; return read_cache_page(mapping, 0, filler, sb); } static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs) { struct logfs_super *super = logfs_super(sb); struct address_space *mapping = super->s_mapping_inode->i_mapping; filler_t *filler = bdev_readpage; u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000; pgoff_t index = pos >> PAGE_SHIFT; *ofs = pos; return read_cache_page(mapping, index, filler, sb); } static int bdev_write_sb(struct super_block *sb, struct page *page) { struct block_device *bdev = logfs_super(sb)->s_bdev; /* Nothing special to do for block devices. */ return sync_request(page, bdev, WRITE); } static void bdev_put_device(struct logfs_super *s) { blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int bdev_can_write_buf(struct super_block *sb, u64 ofs) { return 0; } static const struct logfs_device_ops bd_devops = { .find_first_sb = bdev_find_first_sb, .find_last_sb = bdev_find_last_sb, .write_sb = bdev_write_sb, .readpage = bdev_readpage, .writeseg = bdev_writeseg, .erase = bdev_erase, .can_write_buf = bdev_can_write_buf, .sync = bdev_sync, .put_device = bdev_put_device, }; int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type, const char *devname) { struct block_device *bdev; bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, type); if (IS_ERR(bdev)) return PTR_ERR(bdev); if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { int mtdnr = MINOR(bdev->bd_dev); blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); return logfs_get_sb_mtd(p, mtdnr); } p->s_bdev = bdev; p->s_mtd = NULL; p->s_devops = &bd_devops; return 0; }
gpl-2.0
daeiron/LGD855_kernel
arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c
9435
4759
/* * Sets up interrupt handlers for various hardware switches which are * connected to interrupt lines. * * Copyright 2005-2207 PMC-Sierra, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <msp_int.h> #include <msp_regs.h> #include <msp_regops.h> /* For hwbutton_interrupt->initial_state */ #define HWBUTTON_HI 0x1 #define HWBUTTON_LO 0x2 /* * This struct describes a hardware button */ struct hwbutton_interrupt { char *name; /* Name of button */ int irq; /* Actual LINUX IRQ */ int eirq; /* Extended IRQ number (0-7) */ int initial_state; /* The "normal" state of the switch */ void (*handle_hi)(void *); /* Handler: switch input has gone HI */ void (*handle_lo)(void *); /* Handler: switch input has gone LO */ void *data; /* Optional data to pass to handler */ }; #ifdef CONFIG_PMC_MSP7120_GW extern void msp_restart(char *); static void softreset_push(void *data) { printk(KERN_WARNING "SOFTRESET switch was pushed\n"); /* * In the future you could move this to the release handler, * timing the difference between the 'push' and 'release', and only * doing this ungraceful restart if the button has been down for * a certain amount of time; otherwise doing a graceful restart. */ msp_restart(NULL); } static void softreset_release(void *data) { printk(KERN_WARNING "SOFTRESET switch was released\n"); /* Do nothing */ } static void standby_on(void *data) { printk(KERN_WARNING "STANDBY switch was set to ON (not implemented)\n"); /* TODO: Put board in standby mode */ } static void standby_off(void *data) { printk(KERN_WARNING "STANDBY switch was set to OFF (not implemented)\n"); /* TODO: Take out of standby mode */ } static struct hwbutton_interrupt softreset_sw = { .name = "Softreset button", .irq = MSP_INT_EXT0, .eirq = 0, .initial_state = HWBUTTON_HI, .handle_hi = softreset_release, .handle_lo = softreset_push, .data = NULL, }; static struct hwbutton_interrupt standby_sw = { .name = "Standby switch", .irq = MSP_INT_EXT1, .eirq = 1, .initial_state = HWBUTTON_HI, .handle_hi = standby_off, .handle_lo = standby_on, .data = NULL, }; #endif /* CONFIG_PMC_MSP7120_GW */ static irqreturn_t hwbutton_handler(int irq, void *data) { struct hwbutton_interrupt *hirq = data; unsigned long cic_ext = *CIC_EXT_CFG_REG; if (CIC_EXT_IS_ACTIVE_HI(cic_ext, hirq->eirq)) { /* Interrupt: pin is now HI */ CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq); hirq->handle_hi(hirq->data); } else { /* Interrupt: pin is now LO */ CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq); hirq->handle_lo(hirq->data); } /* * Invert the POLARITY of this level interrupt to ack the interrupt * Thus next state change will invoke the opposite message */ *CIC_EXT_CFG_REG = cic_ext; return IRQ_HANDLED; } static int msp_hwbutton_register(struct hwbutton_interrupt *hirq) { unsigned long cic_ext; if (hirq->handle_hi == NULL || hirq->handle_lo == NULL) return -EINVAL; cic_ext = *CIC_EXT_CFG_REG; CIC_EXT_SET_TRIGGER_LEVEL(cic_ext, hirq->eirq); if (hirq->initial_state == HWBUTTON_HI) CIC_EXT_SET_ACTIVE_LO(cic_ext, hirq->eirq); else CIC_EXT_SET_ACTIVE_HI(cic_ext, hirq->eirq); *CIC_EXT_CFG_REG = cic_ext; return request_irq(hirq->irq, hwbutton_handler, 0, hirq->name, hirq); } static int __init msp_hwbutton_setup(void) { #ifdef CONFIG_PMC_MSP7120_GW msp_hwbutton_register(&softreset_sw); msp_hwbutton_register(&standby_sw); #endif return 0; } subsys_initcall(msp_hwbutton_setup);
gpl-2.0
spica234/LiteGX
drivers/video/kyro/STG4000VTG.c
15579
4649
/* * linux/drivers/video/kyro/STG4000VTG.c * * Copyright (C) 2002 STMicroelectronics * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/types.h> #include <video/kyro.h> #include "STG4000Reg.h" #include "STG4000Interface.h" void DisableVGA(volatile STG4000REG __iomem *pSTGReg) { u32 tmp; volatile u32 count = 0, i; /* Reset the VGA registers */ tmp = STG_READ_REG(SoftwareReset); CLEAR_BIT(8); STG_WRITE_REG(SoftwareReset, tmp); /* Just for Delay */ for (i = 0; i < 1000; i++) { count++; } /* Pull-out the VGA registers from reset */ tmp = STG_READ_REG(SoftwareReset); tmp |= SET_BIT(8); STG_WRITE_REG(SoftwareReset, tmp); } void StopVTG(volatile STG4000REG __iomem *pSTGReg) { u32 tmp = 0; /* Stop Ver and Hor Sync Generator */ tmp = (STG_READ_REG(DACSyncCtrl)) | SET_BIT(0) | SET_BIT(2); CLEAR_BIT(31); STG_WRITE_REG(DACSyncCtrl, tmp); } void StartVTG(volatile STG4000REG __iomem *pSTGReg) { u32 tmp = 0; /* Start Ver and Hor Sync Generator */ tmp = ((STG_READ_REG(DACSyncCtrl)) | SET_BIT(31)); CLEAR_BIT(0); CLEAR_BIT(2); STG_WRITE_REG(DACSyncCtrl, tmp); } void SetupVTG(volatile STG4000REG __iomem *pSTGReg, const struct kyrofb_info * pTiming) { u32 tmp = 0; u32 margins = 0; u32 ulBorder; u32 xRes = pTiming->XRES; u32 yRes = pTiming->YRES; /* Horizontal */ u32 HAddrTime, HRightBorder, HLeftBorder; u32 HBackPorcStrt, HFrontPorchStrt, HTotal, HLeftBorderStrt, HRightBorderStrt, HDisplayStrt; /* Vertical */ u32 VDisplayStrt, VBottomBorder, VTopBorder; u32 VBackPorchStrt, VTotal, VTopBorderStrt, VFrontPorchStrt, VBottomBorderStrt, VAddrTime; /* Need to calculate the right border */ if ((xRes == 640) && (yRes == 480)) { if ((pTiming->VFREQ == 60) || (pTiming->VFREQ == 72)) { margins = 8; } } /* Work out the Border */ ulBorder = (pTiming->HTot - (pTiming->HST + (pTiming->HBP - margins) + xRes + (pTiming->HFP - margins))) >> 1; /* Border the same for Vertical and Horizontal */ VBottomBorder = HLeftBorder = VTopBorder = HRightBorder = ulBorder; /************ Get Timing values for Horizontal ******************/ HAddrTime = xRes; HBackPorcStrt = pTiming->HST; HTotal = pTiming->HTot; HDisplayStrt = pTiming->HST + (pTiming->HBP - margins) + HLeftBorder; HLeftBorderStrt = HDisplayStrt - HLeftBorder; HFrontPorchStrt = pTiming->HST + (pTiming->HBP - margins) + HLeftBorder + HAddrTime + HRightBorder; HRightBorderStrt = HFrontPorchStrt - HRightBorder; /************ Get Timing values for Vertical ******************/ VAddrTime = yRes; VBackPorchStrt = pTiming->VST; VTotal = pTiming->VTot; VDisplayStrt = pTiming->VST + (pTiming->VBP - margins) + VTopBorder; VTopBorderStrt = VDisplayStrt - VTopBorder; VFrontPorchStrt = pTiming->VST + (pTiming->VBP - margins) + VTopBorder + VAddrTime + VBottomBorder; VBottomBorderStrt = VFrontPorchStrt - VBottomBorder; /* Set Hor Timing 1, 2, 3 */ tmp = STG_READ_REG(DACHorTim1); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (HTotal) | (HBackPorcStrt << 16); STG_WRITE_REG(DACHorTim1, tmp); tmp = STG_READ_REG(DACHorTim2); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (HDisplayStrt << 16) | HLeftBorderStrt; STG_WRITE_REG(DACHorTim2, tmp); tmp = STG_READ_REG(DACHorTim3); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (HFrontPorchStrt << 16) | HRightBorderStrt; STG_WRITE_REG(DACHorTim3, tmp); /* Set Ver Timing 1, 2, 3 */ tmp = STG_READ_REG(DACVerTim1); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (VBackPorchStrt << 16) | (VTotal); STG_WRITE_REG(DACVerTim1, tmp); tmp = STG_READ_REG(DACVerTim2); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (VDisplayStrt << 16) | VTopBorderStrt; STG_WRITE_REG(DACVerTim2, tmp); tmp = STG_READ_REG(DACVerTim3); CLEAR_BITS_FRM_TO(0, 11); CLEAR_BITS_FRM_TO(16, 27); tmp |= (VFrontPorchStrt << 16) | VBottomBorderStrt; STG_WRITE_REG(DACVerTim3, tmp); /* Set Verical and Horizontal Polarity */ tmp = STG_READ_REG(DACSyncCtrl) | SET_BIT(3) | SET_BIT(1); if ((pTiming->HSP > 0) && (pTiming->VSP < 0)) { /* +hsync -vsync */ tmp &= ~0x8; } else if ((pTiming->HSP < 0) && (pTiming->VSP > 0)) { /* -hsync +vsync */ tmp &= ~0x2; } else if ((pTiming->HSP < 0) && (pTiming->VSP < 0)) { /* -hsync -vsync */ tmp &= ~0xA; } else if ((pTiming->HSP > 0) && (pTiming->VSP > 0)) { /* +hsync -vsync */ tmp &= ~0x0; } STG_WRITE_REG(DACSyncCtrl, tmp); }
gpl-2.0
dovydasvenckus/linux
drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
220
2959
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * Based on the r8180 driver, which is: * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al. * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> *****************************************************************************/ #include "rtl_pci.h" #include "rtl_core.h" static void rtl8192_parse_pci_configuration(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u8 tmp; u16 LinkCtrlReg; pcie_capability_read_word(priv->pdev, PCI_EXP_LNKCTL, &LinkCtrlReg); RT_TRACE(COMP_INIT, "Link Control Register =%x\n", LinkCtrlReg); pci_read_config_byte(pdev, 0x98, &tmp); tmp |= BIT4; pci_write_config_byte(pdev, 0x98, tmp); tmp = 0x17; pci_write_config_byte(pdev, 0x70f, tmp); } bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev) { struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev); u16 VenderID; u16 DeviceID; u8 RevisionID; u16 IrqLine; VenderID = pdev->vendor; DeviceID = pdev->device; RevisionID = pdev->revision; pci_read_config_word(pdev, 0x3C, &IrqLine); priv->card_8192 = priv->ops->nic_type; if (DeviceID == 0x8192) { switch (RevisionID) { case HAL_HW_PCI_REVISION_ID_8192PCIE: dev_info(&pdev->dev, "Adapter(8192 PCI-E) is found - DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192E; break; case HAL_HW_PCI_REVISION_ID_8192SE: dev_info(&pdev->dev, "Adapter(8192SE) is found - DeviceID=%x\n", DeviceID); priv->card_8192 = NIC_8192SE; break; default: dev_info(&pdev->dev, "UNKNOWN nic type(%4x:%4x)\n", pdev->vendor, pdev->device); priv->card_8192 = NIC_UNKNOWN; return false; } } if (priv->ops->nic_type != priv->card_8192) { dev_info(&pdev->dev, "Detect info(%x) and hardware info(%x) not match!\n", priv->ops->nic_type, priv->card_8192); dev_info(&pdev->dev, "Please select proper driver before install!!!!\n"); return false; } rtl8192_parse_pci_configuration(pdev, dev); return true; }
gpl-2.0
dabaol/linux
drivers/staging/rtl8723au/core/rtw_recv.c
220
63928
/****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * ******************************************************************************/ #define _RTW_RECV_C_ #include <osdep_service.h> #include <drv_types.h> #include <recv_osdep.h> #include <mlme_osdep.h> #include <linux/ip.h> #include <linux/if_ether.h> #include <usb_ops.h> #include <linux/ieee80211.h> #include <wifi.h> #include <rtl8723a_recv.h> #include <rtl8723a_xmit.h> void rtw_signal_stat_timer_hdl23a(unsigned long data); void _rtw_init_sta_recv_priv23a(struct sta_recv_priv *psta_recvpriv) { spin_lock_init(&psta_recvpriv->lock); /* for (i = 0; i<MAX_RX_NUMBLKS; i++) */ /* _rtw_init_queue23a(&psta_recvpriv->blk_strms[i]); */ _rtw_init_queue23a(&psta_recvpriv->defrag_q); } int _rtw_init_recv_priv23a(struct recv_priv *precvpriv, struct rtw_adapter *padapter) { struct recv_frame *precvframe; int i; int res = _SUCCESS; spin_lock_init(&precvpriv->lock); _rtw_init_queue23a(&precvpriv->free_recv_queue); _rtw_init_queue23a(&precvpriv->recv_pending_queue); _rtw_init_queue23a(&precvpriv->uc_swdec_pending_queue); precvpriv->adapter = padapter; for (i = 0; i < NR_RECVFRAME ; i++) { precvframe = kzalloc(sizeof(struct recv_frame), GFP_KERNEL); if (!precvframe) break; INIT_LIST_HEAD(&precvframe->list); list_add_tail(&precvframe->list, &precvpriv->free_recv_queue.queue); precvframe->adapter = padapter; precvframe++; } precvpriv->free_recvframe_cnt = i; precvpriv->rx_pending_cnt = 1; res = rtl8723au_init_recv_priv(padapter); setup_timer(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl23a, (unsigned long)padapter); precvpriv->signal_stat_sampling_interval = 1000; /* ms */ rtw_set_signal_stat_timer(precvpriv); return res; } void _rtw_free_recv_priv23a (struct recv_priv *precvpriv) { struct rtw_adapter *padapter = precvpriv->adapter; struct recv_frame *precvframe; struct list_head *plist, *ptmp; rtw_free_uc_swdec_pending_queue23a(padapter); list_for_each_safe(plist, ptmp, &precvpriv->free_recv_queue.queue) { precvframe = container_of(plist, struct recv_frame, list); list_del_init(&precvframe->list); kfree(precvframe); } rtl8723au_free_recv_priv(padapter); } struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue) { struct recv_frame *pframe; struct list_head *plist, *phead; struct rtw_adapter *padapter; struct recv_priv *precvpriv; spin_lock_bh(&pfree_recv_queue->lock); if (list_empty(&pfree_recv_queue->queue)) pframe = NULL; else { phead = get_list_head(pfree_recv_queue); plist = phead->next; pframe = container_of(plist, struct recv_frame, list); list_del_init(&pframe->list); padapter = pframe->adapter; if (padapter) { precvpriv = &padapter->recvpriv; if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt--; } } spin_unlock_bh(&pfree_recv_queue->lock); return pframe; } int rtw_free_recvframe23a(struct recv_frame *precvframe) { struct rtw_adapter *padapter = precvframe->adapter; struct recv_priv *precvpriv = &padapter->recvpriv; struct rtw_queue *pfree_recv_queue; if (precvframe->pkt) { dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */ precvframe->pkt = NULL; } pfree_recv_queue = &precvpriv->free_recv_queue; spin_lock_bh(&pfree_recv_queue->lock); list_del_init(&precvframe->list); list_add_tail(&precvframe->list, get_list_head(pfree_recv_queue)); if (padapter) { if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt++; } spin_unlock_bh(&pfree_recv_queue->lock); return _SUCCESS; } int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *queue) { struct rtw_adapter *padapter = precvframe->adapter; struct recv_priv *precvpriv = &padapter->recvpriv; spin_lock_bh(&queue->lock); list_del_init(&precvframe->list); list_add_tail(&precvframe->list, get_list_head(queue)); if (padapter) { if (queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt++; } spin_unlock_bh(&queue->lock); return _SUCCESS; } /* caller : defrag ; recvframe_chk_defrag23a in recv_thread (passive) pframequeue: defrag_queue : will be accessed in recv_thread (passive) using spinlock to protect */ static void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue) { struct recv_frame *hdr; struct list_head *plist, *phead, *ptmp; spin_lock(&pframequeue->lock); phead = get_list_head(pframequeue); plist = phead->next; list_for_each_safe(plist, ptmp, phead) { hdr = container_of(plist, struct recv_frame, list); rtw_free_recvframe23a(hdr); } spin_unlock(&pframequeue->lock); } u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter) { u32 cnt = 0; struct recv_frame *pending_frame; while ((pending_frame = rtw_alloc_recvframe23a(&adapter->recvpriv.uc_swdec_pending_queue))) { rtw_free_recvframe23a(pending_frame); DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__); cnt++; } return cnt; } int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue) { spin_lock_bh(&queue->lock); list_del_init(&precvbuf->list); list_add(&precvbuf->list, get_list_head(queue)); spin_unlock_bh(&queue->lock); return _SUCCESS; } int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue) { unsigned long irqL; spin_lock_irqsave(&queue->lock, irqL); list_del_init(&precvbuf->list); list_add_tail(&precvbuf->list, get_list_head(queue)); spin_unlock_irqrestore(&queue->lock, irqL); return _SUCCESS; } struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue) { unsigned long irqL; struct recv_buf *precvbuf; struct list_head *plist, *phead; spin_lock_irqsave(&queue->lock, irqL); if (list_empty(&queue->queue)) { precvbuf = NULL; } else { phead = get_list_head(queue); plist = phead->next; precvbuf = container_of(plist, struct recv_buf, list); list_del_init(&precvbuf->list); } spin_unlock_irqrestore(&queue->lock, irqL); return precvbuf; } int recvframe_chkmic(struct rtw_adapter *adapter, struct recv_frame *precvframe); int recvframe_chkmic(struct rtw_adapter *adapter, struct recv_frame *precvframe) { int i, res = _SUCCESS; u32 datalen; u8 miccode[8]; u8 bmic_err = false, brpt_micerror = true; u8 *pframe, *payload, *pframemic; u8 *mickey; /* u8 *iv, rxdata_key_idx = 0; */ struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &precvframe->attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; stainfo = rtw_get_stainfo23a(&adapter->stapriv, &prxattrib->ta[0]); if (prxattrib->encrypt == WLAN_CIPHER_SUITE_TKIP) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "recvframe_chkmic:prxattrib->encrypt == WLAN_CIPHER_SUITE_TKIP\n"); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "recvframe_chkmic:da = %pM\n", prxattrib->ra); /* calculate mic code */ if (stainfo != NULL) { if (is_multicast_ether_addr(prxattrib->ra)) { mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0]; RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "recvframe_chkmic: bcmc key\n"); if (!psecuritypriv->binstallGrpkey) { res = _FAIL; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chkmic:didn't install group key!\n"); DBG_8723A("\n recvframe_chkmic:didn't " "install group key!!!!!!\n"); goto exit; } } else { mickey = &stainfo->dot11tkiprxmickey.skey[0]; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chkmic: unicast key\n"); } /* icv_len included the mic code */ datalen = precvframe->pkt->len-prxattrib-> hdrlen-prxattrib->iv_len-prxattrib->icv_len - 8; pframe = precvframe->pkt->data; payload = pframe + prxattrib->hdrlen + prxattrib->iv_len; RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "prxattrib->iv_len =%d prxattrib->icv_len =%d\n", prxattrib->iv_len, prxattrib->icv_len); /* care the length of the data */ rtw_seccalctkipmic23a(mickey, pframe, payload, datalen, &miccode[0], (unsigned char)prxattrib->priority); pframemic = payload + datalen; bmic_err = false; for (i = 0; i < 8; i++) { if (miccode[i] != *(pframemic + i)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chkmic:miccode[%d](%02x) != *(pframemic+%d)(%02x)\n", i, miccode[i], i, *(pframemic + i)); bmic_err = true; } } if (bmic_err == true) { int i; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "*(pframemic-8)-*(pframemic-1) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", *(pframemic - 8), *(pframemic - 7), *(pframemic - 6), *(pframemic - 5), *(pframemic - 4), *(pframemic - 3), *(pframemic - 2), *(pframemic - 1)); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "*(pframemic-16)-*(pframemic-9) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", *(pframemic - 16), *(pframemic - 15), *(pframemic - 14), *(pframemic - 13), *(pframemic - 12), *(pframemic - 11), *(pframemic - 10), *(pframemic - 9)); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "====== demp packet (len =%d) ======\n", precvframe->pkt->len); for (i = 0; i < precvframe->pkt->len; i = i + 8) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", *(precvframe->pkt->data+i), *(precvframe->pkt->data+i+1), *(precvframe->pkt->data+i+2), *(precvframe->pkt->data+i+3), *(precvframe->pkt->data+i+4), *(precvframe->pkt->data+i+5), *(precvframe->pkt->data+i+6), *(precvframe->pkt->data+i+7)); } RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "====== demp packet end [len =%d]======\n", precvframe->pkt->len); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "hrdlen =%d\n", prxattrib->hdrlen); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "ra = %pM psecuritypriv->binstallGrpkey =%d\n", prxattrib->ra, psecuritypriv->binstallGrpkey); /* double check key_index for some timing issue, cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */ if ((is_multicast_ether_addr(prxattrib->ra)) && (prxattrib->key_index != pmlmeinfo->key_index)) brpt_micerror = false; if ((prxattrib->bdecrypted == true) && (brpt_micerror == true)) { rtw_handle_tkip_mic_err23a(adapter, (u8)is_multicast_ether_addr(prxattrib->ra)); RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "mic error :prxattrib->bdecrypted =%d\n", prxattrib->bdecrypted); DBG_8723A(" mic error :prxattrib->" "bdecrypted =%d\n", prxattrib->bdecrypted); } else { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "mic error :prxattrib->bdecrypted =%d\n", prxattrib->bdecrypted); DBG_8723A(" mic error :prxattrib->" "bdecrypted =%d\n", prxattrib->bdecrypted); } res = _FAIL; } else { /* mic checked ok */ if (!psecuritypriv->bcheck_grpkey && is_multicast_ether_addr(prxattrib->ra)) { psecuritypriv->bcheck_grpkey = 1; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "psecuritypriv->bcheck_grpkey = true\n"); } } } else { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chkmic: rtw_get_stainfo23a ==NULL!!!\n"); } skb_trim(precvframe->pkt, precvframe->pkt->len - 8); } exit: return res; } /* decrypt and set the ivlen, icvlen of the recv_frame */ struct recv_frame *decryptor(struct rtw_adapter *padapter, struct recv_frame *precv_frame); struct recv_frame *decryptor(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { struct rx_pkt_attrib *prxattrib = &precv_frame->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct recv_frame *return_packet = precv_frame; int res = _SUCCESS; RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "prxstat->decrypted =%x prxattrib->encrypt = 0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt); if (prxattrib->encrypt > 0) { u8 *iv = precv_frame->pkt->data + prxattrib->hdrlen; prxattrib->key_index = (((iv[3]) >> 6) & 0x3); if (prxattrib->key_index > WEP_KEYS) { DBG_8723A("prxattrib->key_index(%d) > WEP_KEYS\n", prxattrib->key_index); switch (prxattrib->encrypt) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex; break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: default: prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid; break; } } } if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0))) { psecuritypriv->hw_decrypted = 0; switch (prxattrib->encrypt) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: rtw_wep_decrypt23a(padapter, precv_frame); break; case WLAN_CIPHER_SUITE_TKIP: res = rtw_tkip_decrypt23a(padapter, precv_frame); break; case WLAN_CIPHER_SUITE_CCMP: res = rtw_aes_decrypt23a(padapter, precv_frame); break; default: break; } } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 && (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != WLAN_CIPHER_SUITE_TKIP)) { psecuritypriv->hw_decrypted = 1; } if (res == _FAIL) { rtw_free_recvframe23a(return_packet); return_packet = NULL; } return return_packet; } /* set the security information in the recv_frame */ static struct recv_frame *portctrl(struct rtw_adapter *adapter, struct recv_frame *precv_frame) { u8 *psta_addr, *ptr; uint auth_alg; struct recv_frame *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv ; struct recv_frame *prtnframe; u16 ether_type; u16 eapol_type = ETH_P_PAE;/* for Funia BD's WPA issue */ struct rx_pkt_attrib *pattrib; pstapriv = &adapter->stapriv; auth_alg = adapter->securitypriv.dot11AuthAlgrthm; pfhdr = precv_frame; pattrib = &pfhdr->attrib; psta_addr = pattrib->ta; psta = rtw_get_stainfo23a(pstapriv, psta_addr); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "########portctrl:adapter->securitypriv.dot11AuthAlgrthm =%d\n", adapter->securitypriv.dot11AuthAlgrthm); prtnframe = precv_frame; if (auth_alg == dot11AuthAlgrthm_8021X) { /* get ether_type */ ptr = pfhdr->pkt->data + pfhdr->attrib.hdrlen; ether_type = (ptr[6] << 8) | ptr[7]; if (psta && psta->ieee8021x_blocked) { /* blocked */ /* only accept EAPOL frame */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "########portctrl:psta->ieee8021x_blocked ==1\n"); if (ether_type != eapol_type) { /* free this frame */ rtw_free_recvframe23a(precv_frame); prtnframe = NULL; } } } return prtnframe; } int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache); int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache) { int tid = precv_frame->attrib.priority; u16 seq_ctrl = ((precv_frame->attrib.seq_num & 0xffff) << 4) | (precv_frame->attrib.frag_num & 0xf); if (tid > 15) { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "recv_decache, (tid>15)! seq_ctrl = 0x%x, tid = 0x%x\n", seq_ctrl, tid); return _FAIL; } if (1) { /* if (bretry) */ if (seq_ctrl == prxcache->tid_rxseq[tid]) { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "recv_decache, seq_ctrl = 0x%x, tid = 0x%x, tid_rxseq = 0x%x\n", seq_ctrl, tid, prxcache->tid_rxseq[tid]); return _FAIL; } } prxcache->tid_rxseq[tid] = seq_ctrl; return _SUCCESS; } void process23a_pwrbit_data(struct rtw_adapter *padapter, struct recv_frame *precv_frame); void process23a_pwrbit_data(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { #ifdef CONFIG_8723AU_AP_MODE unsigned char pwrbit; struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct rx_pkt_attrib *pattrib = &precv_frame->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *psta = NULL; psta = rtw_get_stainfo23a(pstapriv, pattrib->src); if (psta) { pwrbit = ieee80211_has_pm(hdr->frame_control); if (pwrbit) { if (!(psta->state & WIFI_SLEEP_STATE)) stop_sta_xmit23a(padapter, psta); } else { if (psta->state & WIFI_SLEEP_STATE) wakeup_sta_to_xmit23a(padapter, psta); } } #endif } void process_wmmps_data(struct rtw_adapter *padapter, struct recv_frame *precv_frame); void process_wmmps_data(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { #ifdef CONFIG_8723AU_AP_MODE struct rx_pkt_attrib *pattrib = &precv_frame->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct sta_info *psta = NULL; psta = rtw_get_stainfo23a(pstapriv, pattrib->src); if (!psta) return; if (!psta->qos_option) return; if (!(psta->qos_info & 0xf)) return; if (psta->state & WIFI_SLEEP_STATE) { u8 wmmps_ac = 0; switch (pattrib->priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk & BIT(1); break; case 4: case 5: wmmps_ac = psta->uapsd_vi & BIT(1); break; case 6: case 7: wmmps_ac = psta->uapsd_vo & BIT(1); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be & BIT(1); break; } if (wmmps_ac) { if (psta->sleepq_ac_len > 0) { /* process received triggered frame */ xmit_delivery_enabled_frames23a(padapter, psta); } else { /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */ issue_qos_nulldata23a(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0); } } } #endif } static void count_rx_stats(struct rtw_adapter *padapter, struct recv_frame *prframe, struct sta_info *sta) { int sz; struct sta_info *psta = NULL; struct stainfo_stats *pstats = NULL; struct rx_pkt_attrib *pattrib = & prframe->attrib; struct recv_priv *precvpriv = &padapter->recvpriv; sz = prframe->pkt->len; precvpriv->rx_bytes += sz; padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++; if ((!is_broadcast_ether_addr(pattrib->dst)) && (!is_multicast_ether_addr(pattrib->dst))) padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++; if (sta) psta = sta; else psta = prframe->psta; if (psta) { pstats = &psta->sta_stats; pstats->rx_data_pkts++; pstats->rx_bytes += sz; } } static int sta2sta_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame, struct sta_info**psta) { struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; int ret = _SUCCESS; struct rx_pkt_attrib *pattrib = & precv_frame->attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); u8 *sta_addr = NULL; int bmcast = is_multicast_ether_addr(pattrib->dst); if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) { /* filter packets that SA is myself or multicast or broadcast */ if (ether_addr_equal(myhwaddr, pattrib->src)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "SA == myself\n"); ret = _FAIL; goto exit; } if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) { ret = _FAIL; goto exit; } if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") || ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") || !ether_addr_equal(pattrib->bssid, mybssid)) { ret = _FAIL; goto exit; } sta_addr = pattrib->src; } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { /* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */ if (!ether_addr_equal(pattrib->bssid, pattrib->src)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "bssid != TA under STATION_MODE; drop pkt\n"); ret = _FAIL; goto exit; } sta_addr = pattrib->bssid; } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { if (bmcast) { /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */ if (!is_multicast_ether_addr(pattrib->bssid)) { ret = _FAIL; goto exit; } } else { /* not mc-frame */ /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */ if (!ether_addr_equal(pattrib->bssid, pattrib->dst)) { ret = _FAIL; goto exit; } sta_addr = pattrib->src; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { ether_addr_copy(pattrib->dst, hdr->addr1); ether_addr_copy(pattrib->src, hdr->addr2); ether_addr_copy(pattrib->bssid, hdr->addr3); ether_addr_copy(pattrib->ra, pattrib->dst); ether_addr_copy(pattrib->ta, pattrib->src); sta_addr = mybssid; } else { ret = _FAIL; } if (bmcast) *psta = rtw_get_bcmc_stainfo23a(adapter); else *psta = rtw_get_stainfo23a(pstapriv, sta_addr); /* get ap_info */ if (*psta == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "can't get psta under sta2sta_data_frame ; drop pkt\n"); ret = _FAIL; goto exit; } exit: return ret; } int ap2sta_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta); int ap2sta_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta) { struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct rx_pkt_attrib *pattrib = & precv_frame->attrib; int ret = _SUCCESS; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u8 *mybssid = get_bssid(pmlmepriv); u8 *myhwaddr = myid(&adapter->eeprompriv); int bmcast = is_multicast_ether_addr(pattrib->dst); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && (check_fwstate(pmlmepriv, _FW_LINKED) || check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) { /* filter packets that SA is myself or multicast or broadcast */ if (ether_addr_equal(myhwaddr, pattrib->src)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "SA == myself\n"); ret = _FAIL; goto exit; } /* da should be for me */ if (!ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "ap2sta_data_frame: compare DA failed; DA=%pM\n", pattrib->dst); ret = _FAIL; goto exit; } /* check BSSID */ if (ether_addr_equal(pattrib->bssid, "\x0\x0\x0\x0\x0\x0") || ether_addr_equal(mybssid, "\x0\x0\x0\x0\x0\x0") || !ether_addr_equal(pattrib->bssid, mybssid)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "ap2sta_data_frame: compare BSSID failed; BSSID=%pM\n", pattrib->bssid); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "mybssid=%pM\n", mybssid); if (!bmcast) { DBG_8723A("issue_deauth23a to the nonassociated ap=%pM for the reason(7)\n", pattrib->bssid); issue_deauth23a(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); } ret = _FAIL; goto exit; } if (bmcast) *psta = rtw_get_bcmc_stainfo23a(adapter); else /* get ap_info */ *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid); if (*psta == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "ap2sta: can't get psta under STATION_MODE; drop pkt\n"); ret = _FAIL; goto exit; } if (ieee80211_is_nullfunc(hdr->frame_control)) { /* No data, will not indicate to upper layer, temporily count it here */ count_rx_stats(adapter, precv_frame, *psta); ret = RTW_RX_HANDLED; goto exit; } } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)) { ether_addr_copy(pattrib->dst, hdr->addr1); ether_addr_copy(pattrib->src, hdr->addr2); ether_addr_copy(pattrib->bssid, hdr->addr3); ether_addr_copy(pattrib->ra, pattrib->dst); ether_addr_copy(pattrib->ta, pattrib->src); /* */ ether_addr_copy(pattrib->bssid, mybssid); /* get sta_info */ *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid); if (*psta == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "can't get psta under MP_MODE ; drop pkt\n"); ret = _FAIL; goto exit; } } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* Special case */ ret = RTW_RX_HANDLED; goto exit; } else { if (ether_addr_equal(myhwaddr, pattrib->dst) && !bmcast) { *psta = rtw_get_stainfo23a(pstapriv, pattrib->bssid); if (*psta == NULL) { DBG_8723A("issue_deauth23a to the ap=%pM for the reason(7)\n", pattrib->bssid); issue_deauth23a(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); } } ret = _FAIL; } exit: return ret; } int sta2ap_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta); int sta2ap_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame, struct sta_info **psta) { struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct rx_pkt_attrib *pattrib = & precv_frame->attrib; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; unsigned char *mybssid = get_bssid(pmlmepriv); int ret = _SUCCESS; if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */ if (!ether_addr_equal(pattrib->bssid, mybssid)) { ret = _FAIL; goto exit; } *psta = rtw_get_stainfo23a(pstapriv, pattrib->src); if (*psta == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "can't get psta under AP_MODE; drop pkt\n"); DBG_8723A("issue_deauth23a to sta=%pM for the reason(7)\n", pattrib->src); issue_deauth23a(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); ret = RTW_RX_HANDLED; goto exit; } process23a_pwrbit_data(adapter, precv_frame); /* We only get here if it's a data frame, so no need to * confirm data frame type first */ if (ieee80211_is_data_qos(hdr->frame_control)) process_wmmps_data(adapter, precv_frame); if (ieee80211_is_nullfunc(hdr->frame_control)) { /* No data, will not indicate to upper layer, temporily count it here */ count_rx_stats(adapter, precv_frame, *psta); ret = RTW_RX_HANDLED; goto exit; } } else { u8 *myhwaddr = myid(&adapter->eeprompriv); if (!ether_addr_equal(pattrib->ra, myhwaddr)) { ret = RTW_RX_HANDLED; goto exit; } DBG_8723A("issue_deauth23a to sta=%pM for the reason(7)\n", pattrib->src); issue_deauth23a(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); ret = RTW_RX_HANDLED; goto exit; } exit: return ret; } static int validate_recv_ctrl_frame(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { #ifdef CONFIG_8723AU_AP_MODE struct rx_pkt_attrib *pattrib = &precv_frame->attrib; struct sta_priv *pstapriv = &padapter->stapriv; struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_ctl(hdr->frame_control)) return _FAIL; /* receive the frames that ra(a1) is my address */ if (!ether_addr_equal(hdr->addr1, myid(&padapter->eeprompriv))) return _FAIL; /* only handle ps-poll */ if (ieee80211_is_pspoll(hdr->frame_control)) { struct ieee80211_pspoll *psp = (struct ieee80211_pspoll *)hdr; u16 aid; u8 wmmps_ac = 0; struct sta_info *psta = NULL; aid = le16_to_cpu(psp->aid) & 0x3fff; psta = rtw_get_stainfo23a(pstapriv, hdr->addr2); if (!psta || psta->aid != aid) return _FAIL; /* for rx pkt statistics */ psta->sta_stats.rx_ctrl_pkts++; switch (pattrib->priority) { case 1: case 2: wmmps_ac = psta->uapsd_bk & BIT(0); break; case 4: case 5: wmmps_ac = psta->uapsd_vi & BIT(0); break; case 6: case 7: wmmps_ac = psta->uapsd_vo & BIT(0); break; case 0: case 3: default: wmmps_ac = psta->uapsd_be & BIT(0); break; } if (wmmps_ac) return _FAIL; if (psta->state & WIFI_STA_ALIVE_CHK_STATE) { DBG_8723A("%s alive check-rx ps-poll\n", __func__); psta->expire_to = pstapriv->expire_to; psta->state ^= WIFI_STA_ALIVE_CHK_STATE; } if ((psta->state & WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap & CHKBIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = xmitframe_phead->next; if (!list_empty(xmitframe_phead)) { pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list); xmitframe_plist = xmitframe_plist->next; list_del_init(&pxmitframe->list); psta->sleepq_len--; if (psta->sleepq_len>0) pxmitframe->attrib.mdata = 1; else pxmitframe->attrib.mdata = 0; pxmitframe->attrib.triggered = 1; /* DBG_8723A("handling ps-poll, q_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */ rtl8723au_hal_xmitframe_enqueue(padapter, pxmitframe); if (psta->sleepq_len == 0) { pstapriv->tim_bitmap &= ~CHKBIT(psta->aid); /* DBG_8723A("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */ /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon23a(padapter, WLAN_EID_TIM, NULL, false); } /* spin_unlock_bh(&psta->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); } else { /* spin_unlock_bh(&psta->sleep_q.lock); */ spin_unlock_bh(&pxmitpriv->lock); /* DBG_8723A("no buffered packets to xmit\n"); */ if (pstapriv->tim_bitmap & CHKBIT(psta->aid)) { if (psta->sleepq_len == 0) { DBG_8723A("no buffered packets " "to xmit\n"); /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */ issue_nulldata23a(padapter, psta->hwaddr, 0, 0, 0); } else { DBG_8723A("error!psta->sleepq" "_len =%d\n", psta->sleepq_len); psta->sleepq_len = 0; } pstapriv->tim_bitmap &= ~CHKBIT(psta->aid); /* update BCN for TIM IE */ /* update_BCNTIM(padapter); */ update_beacon23a(padapter, WLAN_EID_TIM, NULL, false); } } } } #endif return _FAIL; } struct recv_frame *recvframe_chk_defrag23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame); static int validate_recv_mgnt_frame(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { struct sta_info *psta; struct sk_buff *skb; struct ieee80211_hdr *hdr; /* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "+validate_recv_mgnt_frame\n"); precv_frame = recvframe_chk_defrag23a(padapter, precv_frame); if (precv_frame == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "%s: fragment packet\n", __func__); return _SUCCESS; } skb = precv_frame->pkt; hdr = (struct ieee80211_hdr *) skb->data; /* for rx pkt statistics */ psta = rtw_get_stainfo23a(&padapter->stapriv, hdr->addr2); if (psta) { psta->sta_stats.rx_mgnt_pkts++; if (ieee80211_is_beacon(hdr->frame_control)) psta->sta_stats.rx_beacon_pkts++; else if (ieee80211_is_probe_req(hdr->frame_control)) psta->sta_stats.rx_probereq_pkts++; else if (ieee80211_is_probe_resp(hdr->frame_control)) { if (ether_addr_equal(padapter->eeprompriv.mac_addr, hdr->addr1)) psta->sta_stats.rx_probersp_pkts++; else if (is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1)) psta->sta_stats.rx_probersp_bm_pkts++; else psta->sta_stats.rx_probersp_uo_pkts++; } } mgt_dispatcher23a(padapter, precv_frame); return _SUCCESS; } static int validate_recv_data_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame) { u8 bretry; u8 *psa, *pda; struct sta_info *psta = NULL; struct rx_pkt_attrib *pattrib = & precv_frame->attrib; struct security_priv *psecuritypriv = &adapter->securitypriv; int ret = _SUCCESS; struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; bretry = ieee80211_has_retry(hdr->frame_control); pda = ieee80211_get_DA(hdr); psa = ieee80211_get_SA(hdr); ether_addr_copy(pattrib->dst, pda); ether_addr_copy(pattrib->src, psa); switch (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { case cpu_to_le16(0): ether_addr_copy(pattrib->bssid, hdr->addr3); ether_addr_copy(pattrib->ra, pda); ether_addr_copy(pattrib->ta, psa); ret = sta2sta_data_frame(adapter, precv_frame, &psta); break; case cpu_to_le16(IEEE80211_FCTL_FROMDS): ether_addr_copy(pattrib->bssid, hdr->addr2); ether_addr_copy(pattrib->ra, pda); ether_addr_copy(pattrib->ta, hdr->addr2); ret = ap2sta_data_frame(adapter, precv_frame, &psta); break; case cpu_to_le16(IEEE80211_FCTL_TODS): ether_addr_copy(pattrib->bssid, hdr->addr1); ether_addr_copy(pattrib->ra, hdr->addr1); ether_addr_copy(pattrib->ta, psa); ret = sta2ap_data_frame(adapter, precv_frame, &psta); break; case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): /* * There is no BSSID in this case, but the driver has been * using addr1 so far, so keep it for now. */ ether_addr_copy(pattrib->bssid, hdr->addr1); ether_addr_copy(pattrib->ra, hdr->addr1); ether_addr_copy(pattrib->ta, hdr->addr2); ret = _FAIL; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "case 3\n"); break; } if ((ret == _FAIL) || (ret == RTW_RX_HANDLED)) goto exit; if (!psta) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "after to_fr_ds_chk; psta == NULL\n"); ret = _FAIL; goto exit; } /* psta->rssi = prxcmd->rssi; */ /* psta->signal_quality = prxcmd->sq; */ precv_frame->psta = psta; pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr); if (ieee80211_has_a4(hdr->frame_control)) pattrib->hdrlen += ETH_ALEN; /* parsing QC field */ if (pattrib->qos == 1) { __le16 *qptr = (__le16 *)ieee80211_get_qos_ctl(hdr); u16 qos_ctrl = le16_to_cpu(*qptr); pattrib->priority = qos_ctrl & IEEE80211_QOS_CTL_TID_MASK; pattrib->ack_policy = (qos_ctrl >> 5) & 3; pattrib->amsdu = (qos_ctrl & IEEE80211_QOS_CTL_A_MSDU_PRESENT) >> 7; pattrib->hdrlen += IEEE80211_QOS_CTL_LEN; if (pattrib->priority != 0 && pattrib->priority != 3) { adapter->recvpriv.bIsAnyNonBEPkts = true; } } else { pattrib->priority = 0; pattrib->ack_policy = 0; pattrib->amsdu = 0; } if (pattrib->order) { /* HT-CTRL 11n */ pattrib->hdrlen += 4; } precv_frame->preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority]; /* decache, drop duplicate recv packets */ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "decache : drop pkt\n"); ret = _FAIL; goto exit; } if (pattrib->privacy) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "validate_recv_data_frame:pattrib->privacy =%x\n", pattrib->privacy); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "^^^^^^^^^^^is_multicast_ether_addr(pattrib->ra(0x%02x)) =%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], is_multicast_ether_addr(pattrib->ra)); GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra)); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "pattrib->encrypt =%d\n", pattrib->encrypt); switch (pattrib->encrypt) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: pattrib->iv_len = IEEE80211_WEP_IV_LEN; pattrib->icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: pattrib->iv_len = IEEE80211_TKIP_IV_LEN; pattrib->icv_len = IEEE80211_TKIP_ICV_LEN; break; case WLAN_CIPHER_SUITE_CCMP: pattrib->iv_len = IEEE80211_CCMP_HDR_LEN; pattrib->icv_len = IEEE80211_CCMP_MIC_LEN; break; default: pattrib->iv_len = 0; pattrib->icv_len = 0; break; } } else { pattrib->encrypt = 0; pattrib->iv_len = 0; pattrib->icv_len = 0; } exit: return ret; } static void dump_rx_pkt(struct sk_buff *skb, u16 type, int level) { int i; u8 *ptr; if ((level == 1) || ((level == 2) && (type == IEEE80211_FTYPE_MGMT)) || ((level == 3) && (type == IEEE80211_FTYPE_DATA))) { ptr = skb->data; DBG_8723A("#############################\n"); for (i = 0; i < 64; i = i + 8) DBG_8723A("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr + i), *(ptr + i + 1), *(ptr + i + 2), *(ptr + i + 3), *(ptr + i + 4), *(ptr + i + 5), *(ptr + i + 6), *(ptr + i + 7)); DBG_8723A("#############################\n"); } } static int validate_recv_frame(struct rtw_adapter *adapter, struct recv_frame *precv_frame) { /* shall check frame subtype, to / from ds, da, bssid */ /* then call check if rx seq/frag. duplicated. */ u8 type; u8 subtype; int retval = _SUCCESS; struct rx_pkt_attrib *pattrib = & precv_frame->attrib; struct sk_buff *skb = precv_frame->pkt; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u8 ver; u8 bDumpRxPkt; u16 seq_ctrl, fctl; fctl = le16_to_cpu(hdr->frame_control); ver = fctl & IEEE80211_FCTL_VERS; type = fctl & IEEE80211_FCTL_FTYPE; subtype = fctl & IEEE80211_FCTL_STYPE; /* add version chk */ if (ver != 0) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "validate_recv_data_frame fail! (ver!= 0)\n"); retval = _FAIL; goto exit; } seq_ctrl = le16_to_cpu(hdr->seq_ctrl); pattrib->frag_num = seq_ctrl & IEEE80211_SCTL_FRAG; pattrib->seq_num = seq_ctrl >> 4; pattrib->pw_save = ieee80211_has_pm(hdr->frame_control); pattrib->mfrag = ieee80211_has_morefrags(hdr->frame_control); pattrib->mdata = ieee80211_has_moredata(hdr->frame_control); pattrib->privacy = ieee80211_has_protected(hdr->frame_control); pattrib->order = ieee80211_has_order(hdr->frame_control); GetHalDefVar8192CUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt); if (unlikely(bDumpRxPkt == 1)) dump_rx_pkt(skb, type, bDumpRxPkt); switch (type) { case IEEE80211_FTYPE_MGMT: retval = validate_recv_mgnt_frame(adapter, precv_frame); if (retval == _FAIL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "validate_recv_mgnt_frame fail\n"); } retval = _FAIL; /* only data frame return _SUCCESS */ break; case IEEE80211_FTYPE_CTL: retval = validate_recv_ctrl_frame(adapter, precv_frame); if (retval == _FAIL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "validate_recv_ctrl_frame fail\n"); } retval = _FAIL; /* only data frame return _SUCCESS */ break; case IEEE80211_FTYPE_DATA: pattrib->qos = (subtype & IEEE80211_STYPE_QOS_DATA) ? 1 : 0; retval = validate_recv_data_frame(adapter, precv_frame); if (retval == _FAIL) { struct recv_priv *precvpriv = &adapter->recvpriv; precvpriv->rx_drop++; } break; default: RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "validate_recv_data_frame fail! type = 0x%x\n", type); retval = _FAIL; break; } exit: return retval; } /* remove the wlanhdr and add the eth_hdr */ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe) { u16 eth_type, len, hdrlen; u8 bsnaphdr; u8 *psnap; struct rtw_adapter *adapter = precvframe->adapter; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sk_buff *skb = precvframe->pkt; u8 *ptr; struct rx_pkt_attrib *pattrib = &precvframe->attrib; ptr = skb->data; hdrlen = pattrib->hdrlen; psnap = ptr + hdrlen; eth_type = (psnap[6] << 8) | psnap[7]; /* convert hdr + possible LLC headers into Ethernet header */ /* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */ if ((ether_addr_equal(psnap, rfc1042_header) && eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || ether_addr_equal(psnap, bridge_tunnel_header)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */ bsnaphdr = true; hdrlen += SNAP_SIZE; } else { /* Leave Ethernet header part of hdr and full payload */ bsnaphdr = false; eth_type = (psnap[0] << 8) | psnap[1]; } len = skb->len - hdrlen; RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "=== pattrib->hdrlen: %x, pattrib->iv_len:%x ===\n", pattrib->hdrlen, pattrib->iv_len); pattrib->eth_type = eth_type; if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) { ptr += hdrlen; *ptr = 0x87; *(ptr + 1) = 0x12; eth_type = 0x8712; /* append rx status for mp test packets */ ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) + 2) - 24); memcpy(ptr, skb->head, 24); ptr += 24; } else { ptr = skb_pull(skb, (hdrlen - sizeof(struct ethhdr) + (bsnaphdr ? 2:0))); } ether_addr_copy(ptr, pattrib->dst); ether_addr_copy(ptr + ETH_ALEN, pattrib->src); if (!bsnaphdr) { put_unaligned_be16(len, ptr + 12); } return _SUCCESS; } /* perform defrag */ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, struct rtw_queue *defrag_q); struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter, struct rtw_queue *defrag_q) { struct list_head *plist, *phead, *ptmp; u8 *data, wlanhdr_offset; u8 curfragnum; struct recv_frame *pnfhdr; struct recv_frame *prframe, *pnextrframe; struct rtw_queue *pfree_recv_queue; struct sk_buff *skb; curfragnum = 0; pfree_recv_queue = &adapter->recvpriv.free_recv_queue; phead = get_list_head(defrag_q); plist = phead->next; prframe = container_of(plist, struct recv_frame, list); list_del_init(&prframe->list); skb = prframe->pkt; if (curfragnum != prframe->attrib.frag_num) { /* the first fragment number must be 0 */ /* free the whole queue */ rtw_free_recvframe23a(prframe); rtw_free_recvframe23a_queue(defrag_q); return NULL; } curfragnum++; phead = get_list_head(defrag_q); data = prframe->pkt->data; list_for_each_safe(plist, ptmp, phead) { pnfhdr = container_of(plist, struct recv_frame, list); pnextrframe = (struct recv_frame *)pnfhdr; /* check the fragment sequence (2nd ~n fragment frame) */ if (curfragnum != pnfhdr->attrib.frag_num) { /* the fragment number must be increasing (after decache) */ /* release the defrag_q & prframe */ rtw_free_recvframe23a(prframe); rtw_free_recvframe23a_queue(defrag_q); return NULL; } curfragnum++; /* copy the 2nd~n fragment frame's payload to the first fragment */ /* get the 2nd~last fragment frame's payload */ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len; skb_pull(pnfhdr->pkt, wlanhdr_offset); /* append to first fragment frame's tail (if privacy frame, pull the ICV) */ skb_trim(skb, skb->len - prframe->attrib.icv_len); memcpy(skb_tail_pointer(skb), pnfhdr->pkt->data, pnfhdr->pkt->len); skb_put(skb, pnfhdr->pkt->len); prframe->attrib.icv_len = pnfhdr->attrib.icv_len; } /* free the defrag_q queue and return the prframe */ rtw_free_recvframe23a_queue(defrag_q); RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "Performance defrag!!!!!\n"); return prframe; } /* check if need to defrag, if needed queue the frame to defrag_q */ struct recv_frame *recvframe_chk_defrag23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame) { u8 ismfrag; u8 fragnum; u8 *psta_addr; struct recv_frame *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv; struct list_head *phead; struct recv_frame *prtnframe = NULL; struct rtw_queue *pfree_recv_queue, *pdefrag_q; pstapriv = &padapter->stapriv; pfhdr = precv_frame; pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* need to define struct of wlan header frame ctrl */ ismfrag = pfhdr->attrib.mfrag; fragnum = pfhdr->attrib.frag_num; psta_addr = pfhdr->attrib.ta; psta = rtw_get_stainfo23a(pstapriv, psta_addr); if (!psta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) pfhdr->pkt->data; if (!ieee80211_is_data(hdr->frame_control)) { psta = rtw_get_bcmc_stainfo23a(padapter); pdefrag_q = &psta->sta_recvpriv.defrag_q; } else pdefrag_q = NULL; } else pdefrag_q = &psta->sta_recvpriv.defrag_q; if ((ismfrag == 0) && (fragnum == 0)) { prtnframe = precv_frame;/* isn't a fragment frame */ } if (ismfrag == 1) { /* 0~(n-1) fragment frame */ /* enqueue to defraf_g */ if (pdefrag_q != NULL) { if (fragnum == 0) { /* the first fragment */ if (!list_empty(&pdefrag_q->queue)) { /* free current defrag_q */ rtw_free_recvframe23a_queue(pdefrag_q); } } /* Then enqueue the 0~(n-1) fragment into the defrag_q */ /* spin_lock(&pdefrag_q->lock); */ phead = get_list_head(pdefrag_q); list_add_tail(&pfhdr->list, phead); /* spin_unlock(&pdefrag_q->lock); */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "Enqueuq: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum); prtnframe = NULL; } else { /* can't find this ta's defrag_queue, so free this recv_frame */ rtw_free_recvframe23a(precv_frame); prtnframe = NULL; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum); } } if ((ismfrag == 0) && (fragnum != 0)) { /* the last fragment frame */ /* enqueue the last fragment */ if (pdefrag_q != NULL) { /* spin_lock(&pdefrag_q->lock); */ phead = get_list_head(pdefrag_q); list_add_tail(&pfhdr->list, phead); /* spin_unlock(&pdefrag_q->lock); */ /* call recvframe_defrag to defrag */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "defrag: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum); precv_frame = recvframe_defrag(padapter, pdefrag_q); prtnframe = precv_frame; } else { /* can't find this ta's defrag_queue, so free this recv_frame */ rtw_free_recvframe23a(precv_frame); prtnframe = NULL; RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "Free because pdefrag_q == NULL: ismfrag = %d, fragnum = %d\n", ismfrag, fragnum); } } if ((prtnframe != NULL) && (prtnframe->attrib.privacy)) { /* after defrag we must check tkip mic code */ if (recvframe_chkmic(padapter, prtnframe) == _FAIL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chkmic(padapter, prtnframe) ==_FAIL\n"); rtw_free_recvframe23a(prtnframe); prtnframe = NULL; } } return prtnframe; } int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe); int amsdu_to_msdu(struct rtw_adapter *padapter, struct recv_frame *prframe) { struct rx_pkt_attrib *pattrib; struct sk_buff *skb, *sub_skb; struct sk_buff_head skb_list; pattrib = &prframe->attrib; skb = prframe->pkt; skb_pull(skb, prframe->attrib.hdrlen); __skb_queue_head_init(&skb_list); ieee80211_amsdu_to_8023s(skb, &skb_list, NULL, 0, 0, false); while (!skb_queue_empty(&skb_list)) { sub_skb = __skb_dequeue(&skb_list); sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev); sub_skb->dev = padapter->pnetdev; sub_skb->ip_summed = CHECKSUM_NONE; netif_rx(sub_skb); } prframe->pkt = NULL; rtw_free_recvframe23a(prframe); return _SUCCESS; } int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num); int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num) { u8 wsize = preorder_ctrl->wsize_b; u16 wend = (preorder_ctrl->indicate_seq + wsize -1) & 0xFFF; /* Rx Reorder initialize condition. */ if (preorder_ctrl->indicate_seq == 0xFFFF) preorder_ctrl->indicate_seq = seq_num; /* Drop out the packet which SeqNum is smaller than WinStart */ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) return false; /* */ /* Sliding window manipulation. Conditions includes: */ /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */ /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ /* */ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) { preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF; } else if (SN_LESS(wend, seq_num)) { /* boundary situation, when seq_num cross 0xFFF */ if (seq_num >= (wsize - 1)) preorder_ctrl->indicate_seq = seq_num + 1 -wsize; else preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1; } return true; } static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl, struct recv_frame *prframe) { struct rx_pkt_attrib *pattrib = &prframe->attrib; struct rtw_queue *ppending_recvframe_queue; struct list_head *phead, *plist, *ptmp; struct recv_frame *hdr; struct rx_pkt_attrib *pnextattrib; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; /* DbgPrint("+enqueue_reorder_recvframe23a()\n"); */ /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ /* spin_lock_ex(&ppending_recvframe_queue->lock); */ phead = get_list_head(ppending_recvframe_queue); list_for_each_safe(plist, ptmp, phead) { hdr = container_of(plist, struct recv_frame, list); pnextattrib = &hdr->attrib; if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) { continue; } else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) { /* Duplicate entry is found!! Do not insert current entry. */ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ return false; } else { break; } /* DbgPrint("enqueue_reorder_recvframe23a():while\n"); */ } /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ /* spin_lock_ex(&ppending_recvframe_queue->lock); */ list_del_init(&prframe->list); list_add_tail(&prframe->list, plist); /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ return true; } int recv_indicatepkts_in_order(struct rtw_adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced); int recv_indicatepkts_in_order(struct rtw_adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced) { /* u8 bcancelled; */ struct list_head *phead, *plist; struct recv_frame *prframe; struct rx_pkt_attrib *pattrib; /* u8 index = 0; */ int bPktInBuf = false; struct recv_priv *precvpriv; struct rtw_queue *ppending_recvframe_queue; precvpriv = &padapter->recvpriv; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; /* DbgPrint("+recv_indicatepkts_in_order\n"); */ /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */ /* spin_lock_ex(&ppending_recvframe_queue->lock); */ phead = get_list_head(ppending_recvframe_queue); plist = phead->next; /* Handling some condition for forced indicate case. */ if (bforced) { if (list_empty(phead)) { /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ return true; } prframe = container_of(plist, struct recv_frame, list); pattrib = &prframe->attrib; preorder_ctrl->indicate_seq = pattrib->seq_num; } /* Prepare indication list and indication. */ /* Check if there is any packet need indicate. */ while (!list_empty(phead)) { prframe = container_of(plist, struct recv_frame, list); pattrib = &prframe->attrib; if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "recv_indicatepkts_in_order: indicate =%d seq =%d amsdu =%d\n", preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu); plist = plist->next; list_del_init(&prframe->list); if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num)) { preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)&0xFFF; } if (!pattrib->amsdu) { if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { rtw_recv_indicatepkt23a(padapter, prframe); } } else { if (amsdu_to_msdu(padapter, prframe) != _SUCCESS) rtw_free_recvframe23a(prframe); } /* Update local variables. */ bPktInBuf = false; } else { bPktInBuf = true; break; } /* DbgPrint("recv_indicatepkts_in_order():while\n"); */ } /* spin_unlock_ex(&ppending_recvframe_queue->lock); */ /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */ return bPktInBuf; } int recv_indicatepkt_reorder(struct rtw_adapter *padapter, struct recv_frame *prframe); int recv_indicatepkt_reorder(struct rtw_adapter *padapter, struct recv_frame *prframe) { int retval = _SUCCESS; struct rx_pkt_attrib *pattrib; struct recv_reorder_ctrl *preorder_ctrl; struct rtw_queue *ppending_recvframe_queue; pattrib = &prframe->attrib; preorder_ctrl = prframe->preorder_ctrl; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; if (!pattrib->amsdu) { /* s1. */ wlanhdr_to_ethhdr(prframe); if ((pattrib->qos!= 1) || (pattrib->eth_type == ETH_P_ARP) || (pattrib->ack_policy != 0)) { if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n"); rtw_recv_indicatepkt23a(padapter, prframe); return _SUCCESS; } return _FAIL; } if (preorder_ctrl->enable == false) { /* indicate this recv_frame */ preorder_ctrl->indicate_seq = pattrib->seq_num; rtw_recv_indicatepkt23a(padapter, prframe); preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096; return _SUCCESS; } } else { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */ if (preorder_ctrl->enable == false) { preorder_ctrl->indicate_seq = pattrib->seq_num; retval = amsdu_to_msdu(padapter, prframe); preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096; return retval; } } spin_lock_bh(&ppending_recvframe_queue->lock); RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "recv_indicatepkt_reorder: indicate =%d seq =%d\n", preorder_ctrl->indicate_seq, pattrib->seq_num); /* s2. check if winstart_b(indicate_seq) needs to been updated */ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) { goto _err_exit; } /* s3. Insert all packet into Reorder Queue to maintain its ordering. */ if (!enqueue_reorder_recvframe23a(preorder_ctrl, prframe)) { goto _err_exit; } /* s4. */ /* Indication process. */ /* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */ /* with the SeqNum smaller than latest WinStart and buffer other packets. */ /* */ /* For Rx Reorder condition: */ /* 1. All packets with SeqNum smaller than WinStart => Indicate */ /* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */ /* */ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) { mod_timer(&preorder_ctrl->reordering_ctrl_timer, jiffies + msecs_to_jiffies(REORDER_WAIT_TIME)); spin_unlock_bh(&ppending_recvframe_queue->lock); } else { spin_unlock_bh(&ppending_recvframe_queue->lock); del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); } return _SUCCESS; _err_exit: spin_unlock_bh(&ppending_recvframe_queue->lock); return _FAIL; } void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext) { struct recv_reorder_ctrl *preorder_ctrl; struct rtw_adapter *padapter; struct rtw_queue *ppending_recvframe_queue; preorder_ctrl = (struct recv_reorder_ctrl *)pcontext; padapter = preorder_ctrl->padapter; ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { return; } /* DBG_8723A("+rtw_reordering_ctrl_timeout_handler23a() =>\n"); */ spin_lock_bh(&ppending_recvframe_queue->lock); if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true) { mod_timer(&preorder_ctrl->reordering_ctrl_timer, jiffies + msecs_to_jiffies(REORDER_WAIT_TIME)); } spin_unlock_bh(&ppending_recvframe_queue->lock); } int process_recv_indicatepkts(struct rtw_adapter *padapter, struct recv_frame *prframe); int process_recv_indicatepkts(struct rtw_adapter *padapter, struct recv_frame *prframe) { int retval = _SUCCESS; /* struct recv_priv *precvpriv = &padapter->recvpriv; */ /* struct rx_pkt_attrib *pattrib = &prframe->attrib; */ struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if (phtpriv->ht_option == true) { /* B/G/N Mode */ /* prframe->preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */ /* including perform A-MPDU Rx Ordering Buffer Control */ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) { if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { retval = _FAIL; return retval; } } } else { /* B/G mode */ retval = wlanhdr_to_ethhdr(prframe); if (retval != _SUCCESS) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "wlanhdr_to_ethhdr: drop pkt\n"); return retval; } if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { /* indicate this recv_frame */ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n"); rtw_recv_indicatepkt23a(padapter, prframe); } else { RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n"); RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, "recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", padapter->bDriverStopped, padapter->bSurpriseRemoved); retval = _FAIL; return retval; } } return retval; } static int recv_func_prehandle(struct rtw_adapter *padapter, struct recv_frame *rframe) { int ret = _SUCCESS; /* check the frame crtl field and decache */ ret = validate_recv_frame(padapter, rframe); if (ret != _SUCCESS) { RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, "recv_func: validate_recv_frame fail! drop pkt\n"); rtw_free_recvframe23a(rframe); goto exit; } exit: return ret; } static int recv_func_posthandle(struct rtw_adapter *padapter, struct recv_frame *prframe) { int ret = _SUCCESS; struct recv_frame *orig_prframe = prframe; struct recv_priv *precvpriv = &padapter->recvpriv; /* DATA FRAME */ prframe = decryptor(padapter, prframe); if (prframe == NULL) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "decryptor: drop pkt\n"); ret = _FAIL; goto _recv_data_drop; } prframe = recvframe_chk_defrag23a(padapter, prframe); if (!prframe) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recvframe_chk_defrag23a: drop pkt\n"); goto _recv_data_drop; } /* * Pull off crypto headers */ if (prframe->attrib.iv_len > 0) { skb_pull(prframe->pkt, prframe->attrib.iv_len); } if (prframe->attrib.icv_len > 0) { skb_trim(prframe->pkt, prframe->pkt->len - prframe->attrib.icv_len); } prframe = portctrl(padapter, prframe); if (!prframe) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "portctrl: drop pkt\n"); ret = _FAIL; goto _recv_data_drop; } count_rx_stats(padapter, prframe, NULL); ret = process_recv_indicatepkts(padapter, prframe); if (ret != _SUCCESS) { RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, "recv_func: process_recv_indicatepkts fail!\n"); rtw_free_recvframe23a(orig_prframe);/* free this recv_frame */ goto _recv_data_drop; } return ret; _recv_data_drop: precvpriv->rx_drop++; return ret; } int rtw_recv_entry23a(struct recv_frame *rframe) { int ret, r; struct rtw_adapter *padapter = rframe->adapter; struct rx_pkt_attrib *prxattrib = &rframe->attrib; struct recv_priv *recvpriv = &padapter->recvpriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct mlme_priv *mlmepriv = &padapter->mlmepriv; /* check if need to handle uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) { struct recv_frame *pending_frame; while ((pending_frame = rtw_alloc_recvframe23a(&padapter->recvpriv.uc_swdec_pending_queue))) { r = recv_func_posthandle(padapter, pending_frame); if (r == _SUCCESS) DBG_8723A("%s: dequeue uc_swdec_pending_queue\n", __func__); } } ret = recv_func_prehandle(padapter, rframe); if (ret == _SUCCESS) { /* check if need to enqueue into uc_swdec_pending_queue*/ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && !is_multicast_ether_addr(prxattrib->ra) && prxattrib->encrypt > 0 && (prxattrib->bdecrypted == 0) && !is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) && !psecuritypriv->busetkipkey) { rtw_enqueue_recvframe23a(rframe, &padapter->recvpriv.uc_swdec_pending_queue); DBG_8723A("%s: no key, enqueue uc_swdec_pending_queue\n", __func__); goto exit; } ret = recv_func_posthandle(padapter, rframe); recvpriv->rx_pkts++; } exit: return ret; } void rtw_signal_stat_timer_hdl23a(unsigned long data) { struct rtw_adapter *adapter = (struct rtw_adapter *)data; struct recv_priv *recvpriv = &adapter->recvpriv; u32 tmp_s, tmp_q; u8 avg_signal_strength = 0; u8 avg_signal_qual = 0; u32 num_signal_strength = 0; u32 num_signal_qual = 0; u8 _alpha = 3; /* this value is based on converging_constant = 5000 */ /* and sampling_interval = 1000 */ if (recvpriv->signal_strength_data.update_req == 0) { /* update_req is clear, means we got rx */ avg_signal_strength = recvpriv->signal_strength_data.avg_val; num_signal_strength = recvpriv->signal_strength_data.total_num; /* after avg_vals are acquired, we can re-stat */ /* the signal values */ recvpriv->signal_strength_data.update_req = 1; } if (recvpriv->signal_qual_data.update_req == 0) { /* update_req is clear, means we got rx */ avg_signal_qual = recvpriv->signal_qual_data.avg_val; num_signal_qual = recvpriv->signal_qual_data.total_num; /* after avg_vals are acquired, we can re-stat */ /*the signal values */ recvpriv->signal_qual_data.update_req = 1; } /* update value of signal_strength, rssi, signal_qual */ if (!check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY)) { tmp_s = avg_signal_strength + (_alpha - 1) * recvpriv->signal_strength; if (tmp_s %_alpha) tmp_s = tmp_s / _alpha + 1; else tmp_s = tmp_s / _alpha; if (tmp_s > 100) tmp_s = 100; tmp_q = avg_signal_qual + (_alpha - 1) * recvpriv->signal_qual; if (tmp_q %_alpha) tmp_q = tmp_q / _alpha + 1; else tmp_q = tmp_q / _alpha; if (tmp_q > 100) tmp_q = 100; recvpriv->signal_strength = tmp_s; recvpriv->signal_qual = tmp_q; DBG_8723A("%s signal_strength:%3u, signal_qual:%3u, " "num_signal_strength:%u, num_signal_qual:%u\n", __func__, recvpriv->signal_strength, recvpriv->signal_qual, num_signal_strength, num_signal_qual); } rtw_set_signal_stat_timer(recvpriv); }
gpl-2.0
hrkfdn/linux-htc-ace
fs/nfsd/nfs4callback.c
476
19983
/* * linux/fs/nfsd/nfs4callback.c * * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <andros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/list.h> #include <linux/inet.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svcsock.h> #include <linux/nfsd/nfsd.h> #include <linux/nfsd/state.h> #include <linux/sunrpc/sched.h> #include <linux/nfs4.h> #include <linux/sunrpc/xprtsock.h> #define NFSDDBG_FACILITY NFSDDBG_PROC #define NFSPROC4_CB_NULL 0 #define NFSPROC4_CB_COMPOUND 1 #define NFS4_STATEID_SIZE 16 /* Index of predefined Linux callback client operations */ enum { NFSPROC4_CLNT_CB_NULL = 0, NFSPROC4_CLNT_CB_RECALL, NFSPROC4_CLNT_CB_SEQUENCE, }; enum nfs_cb_opnum4 { OP_CB_RECALL = 4, OP_CB_SEQUENCE = 11, }; #define NFS4_MAXTAGLEN 20 #define NFS4_enc_cb_null_sz 0 #define NFS4_dec_cb_null_sz 0 #define cb_compound_enc_hdr_sz 4 #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) #define cb_sequence_enc_sz (sessionid_sz + 4 + \ 1 /* no referring calls list yet */) #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) #define op_enc_sz 1 #define op_dec_sz 2 #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ cb_sequence_enc_sz + \ 1 + enc_stateid_sz + \ enc_nfs4_fh_sz) #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ cb_sequence_dec_sz + \ op_dec_sz) struct nfs4_rpc_args { void *args_op; struct nfsd4_cb_sequence args_seq; }; /* * Generic encode routines from fs/nfs/nfs4xdr.c */ static inline __be32 * xdr_writemem(__be32 *p, const void *ptr, int nbytes) { int tmp = XDR_QUADLEN(nbytes); if (!tmp) return p; p[tmp-1] = 0; memcpy(p, ptr, nbytes); return p + tmp; } #define WRITE32(n) *p++ = htonl(n) #define WRITEMEM(ptr,nbytes) do { \ p = xdr_writemem(p, ptr, nbytes); \ } while (0) #define RESERVE_SPACE(nbytes) do { \ p = xdr_reserve_space(xdr, nbytes); \ if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \ BUG_ON(!p); \ } while (0) /* * Generic decode routines from fs/nfs/nfs4xdr.c */ #define DECODE_TAIL \ status = 0; \ out: \ return status; \ xdr_error: \ dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \ status = -EIO; \ goto out #define READ32(x) (x) = ntohl(*p++) #define READ64(x) do { \ (x) = (u64)ntohl(*p++) << 32; \ (x) |= ntohl(*p++); \ } while (0) #define READTIME(x) do { \ p++; \ (x.tv_sec) = ntohl(*p++); \ (x.tv_nsec) = ntohl(*p++); \ } while (0) #define READ_BUF(nbytes) do { \ p = xdr_inline_decode(xdr, nbytes); \ if (!p) { \ dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \ __func__, __LINE__); \ return -EIO; \ } \ } while (0) struct nfs4_cb_compound_hdr { /* args */ u32 ident; /* minorversion 0 only */ u32 nops; __be32 *nops_p; u32 minorversion; /* res */ int status; u32 taglen; char *tag; }; static struct { int stat; int errno; } nfs_cb_errtbl[] = { { NFS4_OK, 0 }, { NFS4ERR_PERM, EPERM }, { NFS4ERR_NOENT, ENOENT }, { NFS4ERR_IO, EIO }, { NFS4ERR_NXIO, ENXIO }, { NFS4ERR_ACCESS, EACCES }, { NFS4ERR_EXIST, EEXIST }, { NFS4ERR_XDEV, EXDEV }, { NFS4ERR_NOTDIR, ENOTDIR }, { NFS4ERR_ISDIR, EISDIR }, { NFS4ERR_INVAL, EINVAL }, { NFS4ERR_FBIG, EFBIG }, { NFS4ERR_NOSPC, ENOSPC }, { NFS4ERR_ROFS, EROFS }, { NFS4ERR_MLINK, EMLINK }, { NFS4ERR_NAMETOOLONG, ENAMETOOLONG }, { NFS4ERR_NOTEMPTY, ENOTEMPTY }, { NFS4ERR_DQUOT, EDQUOT }, { NFS4ERR_STALE, ESTALE }, { NFS4ERR_BADHANDLE, EBADHANDLE }, { NFS4ERR_BAD_COOKIE, EBADCOOKIE }, { NFS4ERR_NOTSUPP, ENOTSUPP }, { NFS4ERR_TOOSMALL, ETOOSMALL }, { NFS4ERR_SERVERFAULT, ESERVERFAULT }, { NFS4ERR_BADTYPE, EBADTYPE }, { NFS4ERR_LOCKED, EAGAIN }, { NFS4ERR_RESOURCE, EREMOTEIO }, { NFS4ERR_SYMLINK, ELOOP }, { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP }, { NFS4ERR_DEADLOCK, EDEADLK }, { -1, EIO } }; static int nfs_cb_stat_to_errno(int stat) { int i; for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { if (nfs_cb_errtbl[i].stat == stat) return nfs_cb_errtbl[i].errno; } /* If we cannot translate the error, the recovery routines should * handle it. * Note: remaining NFSv4 error codes have values > 10000, so should * not conflict with native Linux error codes. */ return stat; } /* * XDR encode */ static void encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr) { __be32 * p; RESERVE_SPACE(16); WRITE32(0); /* tag length is always 0 */ WRITE32(hdr->minorversion); WRITE32(hdr->ident); hdr->nops_p = p; WRITE32(hdr->nops); } static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr) { *hdr->nops_p = htonl(hdr->nops); } static void encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp, struct nfs4_cb_compound_hdr *hdr) { __be32 *p; int len = dp->dl_fh.fh_size; RESERVE_SPACE(12+sizeof(dp->dl_stateid) + len); WRITE32(OP_CB_RECALL); WRITE32(dp->dl_stateid.si_generation); WRITEMEM(&dp->dl_stateid.si_opaque, sizeof(stateid_opaque_t)); WRITE32(0); /* truncate optimization not implemented */ WRITE32(len); WRITEMEM(&dp->dl_fh.fh_base, len); hdr->nops++; } static void encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args, struct nfs4_cb_compound_hdr *hdr) { __be32 *p; if (hdr->minorversion == 0) return; RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20); WRITE32(OP_CB_SEQUENCE); WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN); WRITE32(args->cbs_clp->cl_cb_seq_nr); WRITE32(0); /* slotid, always 0 */ WRITE32(0); /* highest slotid always 0 */ WRITE32(0); /* cachethis always 0 */ WRITE32(0); /* FIXME: support referring_call_lists */ hdr->nops++; } static int nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) { struct xdr_stream xdrs, *xdr = &xdrs; xdr_init_encode(&xdrs, &req->rq_snd_buf, p); RESERVE_SPACE(0); return 0; } static int nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_rpc_args *rpc_args) { struct xdr_stream xdr; struct nfs4_delegation *args = rpc_args->args_op; struct nfs4_cb_compound_hdr hdr = { .ident = args->dl_ident, .minorversion = rpc_args->args_seq.cbs_minorversion, }; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_cb_compound_hdr(&xdr, &hdr); encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr); encode_cb_recall(&xdr, args, &hdr); encode_cb_nops(&hdr); return 0; } static int decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){ __be32 *p; READ_BUF(8); READ32(hdr->status); READ32(hdr->taglen); READ_BUF(hdr->taglen + 4); hdr->tag = (char *)p; p += XDR_QUADLEN(hdr->taglen); READ32(hdr->nops); return 0; } static int decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) { __be32 *p; u32 op; int32_t nfserr; READ_BUF(8); READ32(op); if (op != expected) { dprintk("NFSD: decode_cb_op_hdr: Callback server returned " " operation %d but we issued a request for %d\n", op, expected); return -EIO; } READ32(nfserr); if (nfserr != NFS_OK) return -nfs_cb_stat_to_errno(nfserr); return 0; } /* * Our current back channel implmentation supports a single backchannel * with a single slot. */ static int decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res, struct rpc_rqst *rqstp) { struct nfs4_sessionid id; int status; u32 dummy; __be32 *p; if (res->cbs_minorversion == 0) return 0; status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE); if (status) return status; /* * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ status = -ESERVERFAULT; READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); if (memcmp(id.data, res->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("%s Invalid session id\n", __func__); goto out; } READ32(dummy); if (dummy != res->cbs_clp->cl_cb_seq_nr) { dprintk("%s Invalid sequence number\n", __func__); goto out; } READ32(dummy); /* slotid must be 0 */ if (dummy != 0) { dprintk("%s Invalid slotid\n", __func__); goto out; } /* FIXME: process highest slotid and target highest slotid */ status = 0; out: return status; } static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) { return 0; } static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, struct nfsd4_cb_sequence *seq) { struct xdr_stream xdr; struct nfs4_cb_compound_hdr hdr; int status; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); status = decode_cb_compound_hdr(&xdr, &hdr); if (status) goto out; if (seq) { status = decode_cb_sequence(&xdr, seq, rqstp); if (status) goto out; } status = decode_cb_op_hdr(&xdr, OP_CB_RECALL); out: return status; } /* * RPC procedure tables */ #define PROC(proc, call, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_CB_##call, \ .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CB_##call, \ .p_name = #proc, \ } static struct rpc_procinfo nfs4_cb_procedures[] = { PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null), PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall), }; static struct rpc_version nfs_cb_version4 = { .number = 1, .nrprocs = ARRAY_SIZE(nfs4_cb_procedures), .procs = nfs4_cb_procedures }; static struct rpc_version * nfs_cb_version[] = { NULL, &nfs_cb_version4, }; static struct rpc_program cb_program; static struct rpc_stat cb_stats = { .program = &cb_program }; #define NFS4_CALLBACK 0x40000000 static struct rpc_program cb_program = { .name = "nfs4_cb", .number = NFS4_CALLBACK, .nrvers = ARRAY_SIZE(nfs_cb_version), .version = nfs_cb_version, .stats = &cb_stats, .pipe_dir_name = "/nfsd4_cb", }; static int max_cb_time(void) { return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ; } /* Reference counting, callback cleanup, etc., all look racy as heck. * And why is cb_set an atomic? */ int setup_callback_client(struct nfs4_client *clp) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_timeout timeparms = { .to_initval = max_cb_time(), .to_retries = 0, }; struct rpc_create_args args = { .protocol = XPRT_TRANSPORT_TCP, .address = (struct sockaddr *) &cb->cb_addr, .addrsize = cb->cb_addrlen, .timeout = &timeparms, .program = &cb_program, .prognumber = cb->cb_prog, .version = nfs_cb_version[1]->number, .authflavor = clp->cl_flavor, .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), .client_name = clp->cl_principal, }; struct rpc_clnt *client; if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) return -EINVAL; if (cb->cb_minorversion) { args.bc_xprt = clp->cl_cb_xprt; args.protocol = XPRT_TRANSPORT_BC_TCP; } /* Create RPC client */ client = rpc_create(&args); if (IS_ERR(client)) { dprintk("NFSD: couldn't create callback client: %ld\n", PTR_ERR(client)); return PTR_ERR(client); } cb->cb_client = client; return 0; } static void warn_no_callback_path(struct nfs4_client *clp, int reason) { dprintk("NFSD: warning: no callback path to client %.*s: error %d\n", (int)clp->cl_name.len, clp->cl_name.data, reason); } static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) { struct nfs4_client *clp = calldata; if (task->tk_status) warn_no_callback_path(clp, task->tk_status); else atomic_set(&clp->cl_cb_conn.cb_set, 1); put_nfs4_client(clp); } static const struct rpc_call_ops nfsd4_cb_probe_ops = { .rpc_call_done = nfsd4_cb_probe_done, }; static struct rpc_cred *callback_cred; int set_callback_cred(void) { callback_cred = rpc_lookup_machine_cred(); if (!callback_cred) return -ENOMEM; return 0; } void do_probe_callback(struct nfs4_client *clp) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], .rpc_argp = clp, .rpc_cred = callback_cred }; int status; status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT, &nfsd4_cb_probe_ops, (void *)clp); if (status) { warn_no_callback_path(clp, status); put_nfs4_client(clp); } } /* * Set up the callback client and put a NFSPROC4_CB_NULL on the wire... */ void nfsd4_probe_callback(struct nfs4_client *clp) { int status; BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set)); status = setup_callback_client(clp); if (status) { warn_no_callback_path(clp, status); return; } /* the task holds a reference to the nfs4_client struct */ atomic_inc(&clp->cl_count); do_probe_callback(clp); } /* * There's currently a single callback channel slot. * If the slot is available, then mark it busy. Otherwise, set the * thread for sleeping on the callback RPC wait queue. */ static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, struct rpc_task *task) { struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; u32 *ptr = (u32 *)clp->cl_sessionid.data; int status = 0; dprintk("%s: %u:%u:%u:%u\n", __func__, ptr[0], ptr[1], ptr[2], ptr[3]); if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); dprintk("%s slot is busy\n", __func__); status = -EAGAIN; goto out; } /* * We'll need the clp during XDR encoding and decoding, * and the sequence during decoding to verify the reply */ args->args_seq.cbs_clp = clp; task->tk_msg.rpc_resp = &args->args_seq; out: dprintk("%s status=%d\n", __func__, status); return status; } /* * TODO: cb_sequence should support referring call lists, cachethis, multiple * slots, and mark callback channel down on communication errors. */ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; u32 minorversion = clp->cl_cb_conn.cb_minorversion; int status = 0; args->args_seq.cbs_minorversion = minorversion; if (minorversion) { status = nfsd41_cb_setup_sequence(clp, task); if (status) { if (status != -EAGAIN) { /* terminate rpc task */ task->tk_status = status; task->tk_action = NULL; } return; } } rpc_call_start(task); } static void nfsd4_cb_done(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; dprintk("%s: minorversion=%d\n", __func__, clp->cl_cb_conn.cb_minorversion); if (clp->cl_cb_conn.cb_minorversion) { /* No need for lock, access serialized in nfsd4_cb_prepare */ ++clp->cl_cb_seq_nr; clear_bit(0, &clp->cl_cb_slot_busy); rpc_wake_up_next(&clp->cl_cb_waitq); dprintk("%s: freed slot, new seqid=%d\n", __func__, clp->cl_cb_seq_nr); /* We're done looking into the sequence information */ task->tk_msg.rpc_resp = NULL; } } static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; nfsd4_cb_done(task, calldata); switch (task->tk_status) { case -EIO: /* Network partition? */ atomic_set(&clp->cl_cb_conn.cb_set, 0); warn_no_callback_path(clp, task->tk_status); case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: /* Race: client probably got cb_recall * before open reply granting delegation */ break; default: /* success, or error we can't handle */ goto done; } if (dp->dl_retries--) { rpc_delay(task, 2*HZ); task->tk_status = 0; rpc_restart_call(task); return; } else { atomic_set(&clp->cl_cb_conn.cb_set, 0); warn_no_callback_path(clp, task->tk_status); } done: kfree(task->tk_msg.rpc_argp); } static void nfsd4_cb_recall_release(void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; nfs4_put_delegation(dp); put_nfs4_client(clp); } static const struct rpc_call_ops nfsd4_cb_recall_ops = { .rpc_call_prepare = nfsd4_cb_prepare, .rpc_call_done = nfsd4_cb_recall_done, .rpc_release = nfsd4_cb_recall_release, }; /* * called with dp->dl_count inc'ed. */ void nfsd4_cb_recall(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_client; struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client; struct nfs4_rpc_args *args; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], .rpc_cred = callback_cred }; int status = -ENOMEM; args = kzalloc(sizeof(*args), GFP_KERNEL); if (!args) goto out; args->args_op = dp; msg.rpc_argp = args; dp->dl_retries = 1; status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT, &nfsd4_cb_recall_ops, dp); out: if (status) { kfree(args); put_nfs4_client(clp); nfs4_put_delegation(dp); } }
gpl-2.0
zenfone-legacy/android_kernel_asus_T00F
arch/mips/pci/pci.c
2012
8986
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2011 Wind River Systems, * written by Ralf Baechle (ralf@linux-mips.org) */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/of_address.h> #include <asm/cpu-info.h> /* * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource * assignments. */ /* * The PCI controller list. */ static struct pci_controller *hose_head, **hose_tail = &hose_head; unsigned long PCIBIOS_MIN_IO; unsigned long PCIBIOS_MIN_MEM; static int pci_initialized; /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; struct pci_controller *hose = dev->sysdata; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* Make sure we start at our min on all hoses */ if (start < PCIBIOS_MIN_IO + hose->io_resource->start) start = PCIBIOS_MIN_IO + hose->io_resource->start; /* * Put everything into 0x00-0xff region modulo 0x400 */ if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start) start = PCIBIOS_MIN_MEM + hose->mem_resource->start; } return start; } static void pcibios_scanbus(struct pci_controller *hose) { static int next_busno; static int need_domain_info; LIST_HEAD(resources); struct pci_bus *bus; if (!hose->iommu) PCI_DMA_BUS_IS_PHYS = 1; if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY)) next_busno = (*hose->get_busno)(); pci_add_resource_offset(&resources, hose->mem_resource, hose->mem_offset); pci_add_resource_offset(&resources, hose->io_resource, hose->io_offset); bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose, &resources); if (!bus) pci_free_resource_list(&resources); hose->bus = bus; need_domain_info = need_domain_info || hose->index; hose->need_domain_info = need_domain_info; if (bus) { next_busno = bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { next_busno = 0; need_domain_info = 1; } if (!pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); pci_enable_bridges(bus); } } } #ifdef CONFIG_OF void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node) { const __be32 *ranges; int rlen; int pna = of_n_addr_cells(node); int np = pna + 5; pr_info("PCI host bridge %s ranges:\n", node->full_name); ranges = of_get_property(node, "ranges", &rlen); if (ranges == NULL) return; hose->of_node = node; while ((rlen -= np * 4) >= 0) { u32 pci_space; struct resource *res = NULL; u64 addr, size; pci_space = be32_to_cpup(&ranges[0]); addr = of_translate_address(node, ranges + 3); size = of_read_number(ranges + pna + 3, 2); ranges += np; switch ((pci_space >> 24) & 0x3) { case 1: /* PCI IO space */ pr_info(" IO 0x%016llx..0x%016llx\n", addr, addr + size - 1); hose->io_map_base = (unsigned long)ioremap(addr, size); res = hose->io_resource; res->flags = IORESOURCE_IO; break; case 2: /* PCI Memory space */ case 3: /* PCI 64 bits Memory space */ pr_info(" MEM 0x%016llx..0x%016llx\n", addr, addr + size - 1); res = hose->mem_resource; res->flags = IORESOURCE_MEM; break; } if (res != NULL) { res->start = addr; res->name = node->full_name; res->end = res->start + size - 1; res->parent = NULL; res->sibling = NULL; res->child = NULL; } } } struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) { struct pci_controller *hose = bus->sysdata; return of_node_get(hose->of_node); } #endif static DEFINE_MUTEX(pci_scan_mutex); void register_pci_controller(struct pci_controller *hose) { struct resource *parent; parent = hose->mem_resource->parent; if (!parent) parent = &iomem_resource; if (request_resource(parent, hose->mem_resource) < 0) goto out; parent = hose->io_resource->parent; if (!parent) parent = &ioport_resource; if (request_resource(parent, hose->io_resource) < 0) { release_resource(hose->mem_resource); goto out; } *hose_tail = hose; hose_tail = &hose->next; /* * Do not panic here but later - this might happen before console init. */ if (!hose->io_map_base) { printk(KERN_WARNING "registering PCI controller with io_map_base unset\n"); } /* * Scan the bus if it is register after the PCI subsystem * initialization. */ if (pci_initialized) { mutex_lock(&pci_scan_mutex); pcibios_scanbus(hose); mutex_unlock(&pci_scan_mutex); } return; out: printk(KERN_WARNING "Skipping PCI bus scan due to resource conflict\n"); } static void __init pcibios_set_cache_line_size(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int lsize; /* * Set PCI cacheline size to that of the highest level in the * cache hierarchy. */ lsize = c->dcache.linesz; lsize = c->scache.linesz ? : lsize; lsize = c->tcache.linesz ? : lsize; BUG_ON(!lsize); pci_dfl_cache_line_size = lsize >> 2; pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize); } static int __init pcibios_init(void) { struct pci_controller *hose; pcibios_set_cache_line_size(); /* Scan all of the recorded PCI controllers. */ for (hose = hose_head; hose; hose = hose->next) pcibios_scanbus(hose); pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq); pci_initialized = 1; return 0; } subsys_initcall(pcibios_init); static int pcibios_enable_resources(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int idx; struct resource *r; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx=0; idx < PCI_NUM_RESOURCES; idx++) { /* Only set up the requested stuff */ if (!(mask & (1<<idx))) continue; r = &dev->resource[idx]; if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((idx == PCI_ROM_RESOURCE) && (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available " "because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } unsigned int pcibios_assign_all_busses(void) { return 1; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pcibios_enable_resources(dev, mask)) < 0) return err; return pcibios_plat_dev_init(dev); } void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; if (pci_has_flag(PCI_PROBE_ONLY) && dev && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_read_bridge_bases(bus); } } EXPORT_SYMBOL(PCIBIOS_MIN_IO); EXPORT_SYMBOL(PCIBIOS_MIN_MEM); int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot; /* * I/O space can be accessed via normal processor loads and stores on * this platform but for now we elect not to do this and portable * drivers should not do this anyway. */ if (mmap_state == pci_mmap_io) return -EINVAL; /* * Ignore write-combine; for now only return uncached mappings. */ prot = pgprot_val(vma->vm_page_prot); prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; vma->vm_page_prot = __pgprot(prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } char * (*pcibios_plat_setup)(char *str) __initdata; char *__init pcibios_setup(char *str) { if (pcibios_plat_setup) return pcibios_plat_setup(str); return str; }
gpl-2.0
htc-mirror/evita-ics-crc-3.0.8-271616b
fs/jffs2/fs.c
2268
20346
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/mtd/mtd.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/vfs.h> #include <linux/crc32.h> #include "nodelist.h" static int jffs2_flash_setup(struct jffs2_sb_info *c); int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; uint32_t alloclen; int ret; int alloc_type = ALLOC_NORMAL; D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr must read the original data associated with the node (i.e. the device numbers or the target name) and write it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); } else if (S_ISLNK(inode->i_mode)) { mutex_lock(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); if (!mdata) { mutex_unlock(&f->sem); return -ENOMEM; } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { mutex_unlock(&f->sem); kfree(mdata); return ret; } mutex_unlock(&f->sem); D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); } ri = jffs2_alloc_raw_inode(); if (!ri) { if (S_ISLNK(inode->i_mode)) kfree(mdata); return -ENOMEM; } ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); if (S_ISLNK(inode->i_mode & S_IFMT)) kfree(mdata); return ret; } mutex_lock(&f->sem); ivalid = iattr->ia_valid; ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); if (ivalid & ATTR_MODE) ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); ri->offset = cpu_to_je32(0); ri->csize = ri->dsize = cpu_to_je32(mdatalen); ri->compr = JFFS2_COMPR_NONE; if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { /* It's an extension. Make it a hole node */ ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); } else if (ivalid & ATTR_SIZE && !iattr->ia_size) { /* For truncate-to-zero, treat it as deletion because it'll always be obsoleting all previous nodes */ alloc_type = ALLOC_DELETION; } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); else ri->data_crc = cpu_to_je32(0); new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); if (S_ISLNK(inode->i_mode)) kfree(mdata); if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ inode->i_atime = ITIME(je32_to_cpu(ri->atime)); inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); inode->i_uid = je16_to_cpu(ri->uid); inode->i_gid = je16_to_cpu(ri->gid); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; inode->i_blocks = (inode->i_size + 511) >> 9; f->metadata = NULL; } else { f->metadata = new_metadata; } if (old_metadata) { jffs2_mark_node_obsolete(c, old_metadata->raw); jffs2_free_full_dnode(old_metadata); } jffs2_free_raw_inode(ri); mutex_unlock(&f->sem); jffs2_complete_reservation(c); /* We have to do the truncate_setsize() without f->sem held, since some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { truncate_setsize(inode, iattr->ia_size); inode->i_blocks = (inode->i_size + 511) >> 9; } return 0; } int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) { int rc; rc = inode_change_ok(dentry->d_inode, iattr); if (rc) return rc; rc = jffs2_do_setattr(dentry->d_inode, iattr); if (!rc && (iattr->ia_valid & ATTR_MODE)) rc = jffs2_acl_chmod(dentry->d_inode); return rc; } int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); unsigned long avail; buf->f_type = JFFS2_SUPER_MAGIC; buf->f_bsize = 1 << PAGE_SHIFT; buf->f_blocks = c->flash_size >> PAGE_SHIFT; buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = JFFS2_MAX_NAME_LEN; buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; buf->f_fsid.val[1] = c->mtd->index; spin_lock(&c->erase_completion_lock); avail = c->dirty_size + c->free_size; if (avail > c->sector_size * c->resv_blocks_write) avail -= c->sector_size * c->resv_blocks_write; else avail = 0; spin_unlock(&c->erase_completion_lock); buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; return 0; } void jffs2_evict_inode (struct inode *inode) { /* We can forget about this inode for now - drop all * the nodelists associated with it, etc. */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); jffs2_do_clear_inode(c, f); } struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) { struct jffs2_inode_info *f; struct jffs2_sb_info *c; struct jffs2_raw_inode latest_node; union jffs2_device_node jdev; struct inode *inode; dev_t rdev = 0; int ret; D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; f = JFFS2_INODE_INFO(inode); c = JFFS2_SB_INFO(inode->i_sb); jffs2_init_inode_info(f); mutex_lock(&f->sem); ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); if (ret) { mutex_unlock(&f->sem); iget_failed(inode); return ERR_PTR(ret); } inode->i_mode = jemode_to_cpu(latest_node.mode); inode->i_uid = je16_to_cpu(latest_node.uid); inode->i_gid = je16_to_cpu(latest_node.gid); inode->i_size = je32_to_cpu(latest_node.isize); inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); inode->i_nlink = f->inocache->pino_nlink; inode->i_blocks = (inode->i_size + 511) >> 9; switch (inode->i_mode & S_IFMT) { case S_IFLNK: inode->i_op = &jffs2_symlink_inode_operations; break; case S_IFDIR: { struct jffs2_full_dirent *fd; inode->i_nlink = 2; /* parent and '.' */ for (fd=f->dents; fd; fd = fd->next) { if (fd->type == DT_DIR && fd->ino) inc_nlink(inode); } /* Root dir gets i_nlink 3 for some reason */ if (inode->i_ino == 1) inc_nlink(inode); inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; break; } case S_IFREG: inode->i_op = &jffs2_file_inode_operations; inode->i_fop = &jffs2_file_operations; inode->i_mapping->a_ops = &jffs2_file_address_operations; inode->i_mapping->nrpages = 0; break; case S_IFBLK: case S_IFCHR: /* Read the device numbers from the media */ if (f->metadata->size != sizeof(jdev.old_id) && f->metadata->size != sizeof(jdev.new_id)) { printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); goto error_io; } D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); if (ret < 0) { /* Eep */ printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); goto error; } if (f->metadata->size == sizeof(jdev.old_id)) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); case S_IFSOCK: case S_IFIFO: inode->i_op = &jffs2_file_inode_operations; init_special_inode(inode, inode->i_mode, rdev); break; default: printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); } mutex_unlock(&f->sem); D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); unlock_new_inode(inode); return inode; error_io: ret = -EIO; error: mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); iget_failed(inode); return ERR_PTR(ret); } void jffs2_dirty_inode(struct inode *inode, int flags) { struct iattr iattr; if (!(inode->i_state & I_DIRTY_DATASYNC)) { D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); return; } D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; iattr.ia_mode = inode->i_mode; iattr.ia_uid = inode->i_uid; iattr.ia_gid = inode->i_gid; iattr.ia_atime = inode->i_atime; iattr.ia_mtime = inode->i_mtime; iattr.ia_ctime = inode->i_ctime; jffs2_do_setattr(inode, &iattr); } int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY)) return -EROFS; /* We stop if it was running, then restart if it needs to. This also catches the case where it was stopped and this is just a remount to restart it. Flush the writebuffer, if neccecary, else we loose it */ if (!(sb->s_flags & MS_RDONLY)) { jffs2_stop_garbage_collect_thread(c); mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); mutex_unlock(&c->alloc_sem); } if (!(*flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); *flags |= MS_NOATIME; return 0; } /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, fill in the raw_inode while you're at it. */ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri) { struct inode *inode; struct super_block *sb = dir_i->i_sb; struct jffs2_sb_info *c; struct jffs2_inode_info *f; int ret; D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); c = JFFS2_SB_INFO(sb); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); f = JFFS2_INODE_INFO(inode); jffs2_init_inode_info(f); mutex_lock(&f->sem); memset(ri, 0, sizeof(*ri)); /* Set OS-specific defaults for new inodes */ ri->uid = cpu_to_je16(current_fsuid()); if (dir_i->i_mode & S_ISGID) { ri->gid = cpu_to_je16(dir_i->i_gid); if (S_ISDIR(mode)) mode |= S_ISGID; } else { ri->gid = cpu_to_je16(current_fsgid()); } /* POSIX ACLs have to be processed now, at least partly. The umask is only applied if there's no default ACL */ ret = jffs2_init_acl_pre(dir_i, inode, &mode); if (ret) { make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } ret = jffs2_do_new_inode (c, f, mode, ri); if (ret) { make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } inode->i_nlink = 1; inode->i_ino = je32_to_cpu(ri->ino); inode->i_mode = jemode_to_cpu(ri->mode); inode->i_gid = je16_to_cpu(ri->gid); inode->i_uid = je16_to_cpu(ri->uid); inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); inode->i_blocks = 0; inode->i_size = 0; if (insert_inode_locked(inode) < 0) { make_bad_inode(inode); unlock_new_inode(inode); iput(inode); return ERR_PTR(-EINVAL); } return inode; } static int calculate_inocache_hashsize(uint32_t flash_size) { /* * Pick a inocache hash size based on the size of the medium. * Count how many megabytes we're dealing with, apply a hashsize twice * that size, but rounding down to the usual big powers of 2. And keep * to sensible bounds. */ int size_mb = flash_size / 1024 / 1024; int hashsize = (size_mb * 2) & ~0x3f; if (hashsize < INOCACHE_HASHSIZE_MIN) return INOCACHE_HASHSIZE_MIN; if (hashsize > INOCACHE_HASHSIZE_MAX) return INOCACHE_HASHSIZE_MAX; return hashsize; } int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) { struct jffs2_sb_info *c; struct inode *root_i; int ret; size_t blocks; c = JFFS2_SB_INFO(sb); #ifndef CONFIG_JFFS2_FS_WRITEBUFFER if (c->mtd->type == MTD_NANDFLASH) { printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); return -EINVAL; } if (c->mtd->type == MTD_DATAFLASH) { printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n"); return -EINVAL; } #endif c->flash_size = c->mtd->size; c->sector_size = c->mtd->erasesize; blocks = c->flash_size / c->sector_size; /* * Size alignment check */ if ((c->sector_size * blocks) != c->flash_size) { c->flash_size = c->sector_size * blocks; printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", c->flash_size / 1024); } if (c->flash_size < 5*c->sector_size) { printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); return -EINVAL; } c->cleanmarker_size = sizeof(struct jffs2_unknown_node); /* NAND (or other bizarre) flash... do setup accordingly */ ret = jffs2_flash_setup(c); if (ret) return ret; c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); if (!c->inocache_list) { ret = -ENOMEM; goto out_wbuf; } jffs2_init_xattr_subsystem(c); if ((ret = jffs2_do_mount_fs(c))) goto out_inohash; D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); root_i = jffs2_iget(sb, 1); if (IS_ERR(root_i)) { D1(printk(KERN_WARNING "get root inode failed\n")); ret = PTR_ERR(root_i); goto out_root; } ret = -ENOMEM; D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); sb->s_root = d_alloc_root(root_i); if (!sb->s_root) goto out_root_i; sb->s_maxbytes = 0xFFFFFFFF; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; if (!(sb->s_flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); return 0; out_root_i: iput(root_i); out_root: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else kfree(c->blocks); out_inohash: jffs2_clear_xattr_subsystem(c); kfree(c->inocache_list); out_wbuf: jffs2_flash_cleanup(c); return ret; } void jffs2_gc_release_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { iput(OFNI_EDONI_2SFFJ(f)); } struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, int inum, int unlinked) { struct inode *inode; struct jffs2_inode_cache *ic; if (unlinked) { /* The inode has zero nlink but its nodes weren't yet marked obsolete. This has to be because we're still waiting for the final (close() and) iput() to happen. There's a possibility that the final iput() could have happened while we were contemplating. In order to ensure that we don't cause a new read_inode() (which would fail) for the inode in question, we use ilookup() in this case instead of iget(). The nlink can't _become_ zero at this point because we're holding the alloc_sem, and jffs2_do_unlink() would also need that while decrementing nlink on any inode. */ inode = ilookup(OFNI_BS_2SFFJ(c), inum); if (!inode) { D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", inum)); spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, inum); if (!ic) { D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); spin_unlock(&c->inocache_lock); return NULL; } if (ic->state != INO_STATE_CHECKEDABSENT) { /* Wait for progress. Don't just loop */ D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", ic->ino, ic->state)); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); } else { spin_unlock(&c->inocache_lock); } return NULL; } } else { /* Inode has links to it still; they're not going away because jffs2_do_unlink() would need the alloc_sem and we have it. Just iget() it, and if read_inode() is necessary that's OK. */ inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); if (IS_ERR(inode)) return ERR_CAST(inode); } if (is_bad_inode(inode)) { printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n", inum, unlinked); /* NB. This will happen again. We need to do something appropriate here. */ iput(inode); return ERR_PTR(-EIO); } return JFFS2_INODE_INFO(inode); } unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, struct jffs2_inode_info *f, unsigned long offset, unsigned long *priv) { struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; *priv = (unsigned long)pg; return kmap(pg); } void jffs2_gc_release_page(struct jffs2_sb_info *c, unsigned char *ptr, unsigned long *priv) { struct page *pg = (void *)*priv; kunmap(pg); page_cache_release(pg); } static int jffs2_flash_setup(struct jffs2_sb_info *c) { int ret = 0; if (jffs2_cleanmarker_oob(c)) { /* NAND flash... do setup accordingly */ ret = jffs2_nand_flash_setup(c); if (ret) return ret; } /* and Dataflash */ if (jffs2_dataflash(c)) { ret = jffs2_dataflash_setup(c); if (ret) return ret; } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { ret = jffs2_nor_wbuf_flash_setup(c); if (ret) return ret; } /* and an UBI volume */ if (jffs2_ubivol(c)) { ret = jffs2_ubivol_setup(c); if (ret) return ret; } return ret; } void jffs2_flash_cleanup(struct jffs2_sb_info *c) { if (jffs2_cleanmarker_oob(c)) { jffs2_nand_flash_cleanup(c); } /* and DataFlash */ if (jffs2_dataflash(c)) { jffs2_dataflash_cleanup(c); } /* and Intel "Sibley" flash */ if (jffs2_nor_wbuf_flash(c)) { jffs2_nor_wbuf_flash_cleanup(c); } /* and an UBI volume */ if (jffs2_ubivol(c)) { jffs2_ubivol_cleanup(c); } }
gpl-2.0
dtsd/zte_blade_s6_lollipop_kernel
drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
2268
2539
/* * Copyright (C) 2010 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <subdev/fb.h> struct nv46_fb_priv { struct nouveau_fb base; }; void nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { /* for performance, select alternate bank offset for zeta */ if (!(flags & 4)) tile->addr = (0 << 3); else tile->addr = (1 << 3); tile->addr |= 0x00000001; /* mode = vram */ tile->addr |= addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; } static int nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv46_fb_priv *priv; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.memtype_valid = nv04_fb_memtype_valid; priv->base.ram.init = nv44_fb_vram_init; priv->base.tile.regions = 15; priv->base.tile.init = nv46_fb_tile_init; priv->base.tile.fini = nv20_fb_tile_fini; priv->base.tile.prog = nv44_fb_tile_prog; return nouveau_fb_preinit(&priv->base); } struct nouveau_oclass nv46_fb_oclass = { .handle = NV_SUBDEV(FB, 0x46), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv46_fb_ctor, .dtor = _nouveau_fb_dtor, .init = nv44_fb_init, .fini = _nouveau_fb_fini, }, };
gpl-2.0
gundal/zerofltetmo
drivers/media/usb/gspca/gl860/gl860.c
2268
19122
/* GSPCA subdrivers for Genesys Logic webcams with the GL860 chip * Subdriver core * * 2009/09/24 Olivier Lorin <o.lorin@laposte.net> * GSPCA by Jean-Francois Moine <http://moinejf.free.fr> * Thanks BUGabundo and Malmostoso for your amazing help! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "gspca.h" #include "gl860.h" MODULE_AUTHOR("Olivier Lorin <o.lorin@laposte.net>"); MODULE_DESCRIPTION("Genesys Logic USB PC Camera Driver"); MODULE_LICENSE("GPL"); /*======================== static function declarations ====================*/ static void (*dev_init_settings)(struct gspca_dev *gspca_dev); static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static int sd_init(struct gspca_dev *gspca_dev); static int sd_isoc_init(struct gspca_dev *gspca_dev); static int sd_start(struct gspca_dev *gspca_dev); static void sd_stop0(struct gspca_dev *gspca_dev); static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len); static void sd_callback(struct gspca_dev *gspca_dev); static int gl860_guess_sensor(struct gspca_dev *gspca_dev, u16 vendor_id, u16 product_id); /*============================ driver options ==============================*/ static s32 AC50Hz = 0xff; module_param(AC50Hz, int, 0644); MODULE_PARM_DESC(AC50Hz, " Does AC power frequency is 50Hz? (0/1)"); static char sensor[7]; module_param_string(sensor, sensor, sizeof(sensor), 0644); MODULE_PARM_DESC(sensor, " Driver sensor ('MI1320'/'MI2020'/'OV9655'/'OV2640')"); /*============================ webcam controls =============================*/ static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sd->vcur.brightness = ctrl->val; break; case V4L2_CID_CONTRAST: sd->vcur.contrast = ctrl->val; break; case V4L2_CID_SATURATION: sd->vcur.saturation = ctrl->val; break; case V4L2_CID_HUE: sd->vcur.hue = ctrl->val; break; case V4L2_CID_GAMMA: sd->vcur.gamma = ctrl->val; break; case V4L2_CID_HFLIP: sd->vcur.mirror = ctrl->val; break; case V4L2_CID_VFLIP: sd->vcur.flip = ctrl->val; break; case V4L2_CID_POWER_LINE_FREQUENCY: sd->vcur.AC50Hz = ctrl->val; break; case V4L2_CID_WHITE_BALANCE_TEMPERATURE: sd->vcur.whitebal = ctrl->val; break; case V4L2_CID_SHARPNESS: sd->vcur.sharpness = ctrl->val; break; case V4L2_CID_BACKLIGHT_COMPENSATION: sd->vcur.backlight = ctrl->val; break; default: return -EINVAL; } if (gspca_dev->streaming) sd->waitSet = 1; return 0; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 11); if (sd->vmax.brightness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, sd->vmax.brightness, 1, sd->vcur.brightness); if (sd->vmax.contrast) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, sd->vmax.contrast, 1, sd->vcur.contrast); if (sd->vmax.saturation) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, sd->vmax.saturation, 1, sd->vcur.saturation); if (sd->vmax.hue) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 0, sd->vmax.hue, 1, sd->vcur.hue); if (sd->vmax.gamma) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, sd->vmax.gamma, 1, sd->vcur.gamma); if (sd->vmax.mirror) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, sd->vmax.mirror, 1, sd->vcur.mirror); if (sd->vmax.flip) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, sd->vmax.flip, 1, sd->vcur.flip); if (sd->vmax.AC50Hz) v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, sd->vmax.AC50Hz, 0, sd->vcur.AC50Hz); if (sd->vmax.whitebal) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_WHITE_BALANCE_TEMPERATURE, 0, sd->vmax.whitebal, 1, sd->vcur.whitebal); if (sd->vmax.sharpness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, sd->vmax.sharpness, 1, sd->vcur.sharpness); if (sd->vmax.backlight) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, 0, sd->vmax.backlight, 1, sd->vcur.backlight); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /*==================== sud-driver structure initialisation =================*/ static const struct sd_desc sd_desc_mi1320 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_mi2020 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_ov2640 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; static const struct sd_desc sd_desc_ov9655 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_callback, }; /*=========================== sub-driver image sizes =======================*/ static struct v4l2_pix_format mi2020_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 598, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 598, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 1024, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, {1600, 1198, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1198, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3 }, }; static struct v4l2_pix_format ov2640_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, {1600, 1200, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1200, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3 }, }; static struct v4l2_pix_format mi1320_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, { 800, 600, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2 }, }; static struct v4l2_pix_format ov9655_mode[] = { { 640, 480, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0 }, {1280, 960, V4L2_PIX_FMT_SGBRG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 960, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1 }, }; /*========================= sud-driver functions ===========================*/ /* This function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; u16 vendor_id, product_id; /* Get USB VendorID and ProductID */ vendor_id = id->idVendor; product_id = id->idProduct; sd->nbRightUp = 1; sd->nbIm = -1; sd->sensor = 0xff; if (strcmp(sensor, "MI1320") == 0) sd->sensor = ID_MI1320; else if (strcmp(sensor, "OV2640") == 0) sd->sensor = ID_OV2640; else if (strcmp(sensor, "OV9655") == 0) sd->sensor = ID_OV9655; else if (strcmp(sensor, "MI2020") == 0) sd->sensor = ID_MI2020; /* Get sensor and set the suitable init/start/../stop functions */ if (gl860_guess_sensor(gspca_dev, vendor_id, product_id) == -1) return -1; cam = &gspca_dev->cam; switch (sd->sensor) { case ID_MI1320: gspca_dev->sd_desc = &sd_desc_mi1320; cam->cam_mode = mi1320_mode; cam->nmodes = ARRAY_SIZE(mi1320_mode); dev_init_settings = mi1320_init_settings; break; case ID_MI2020: gspca_dev->sd_desc = &sd_desc_mi2020; cam->cam_mode = mi2020_mode; cam->nmodes = ARRAY_SIZE(mi2020_mode); dev_init_settings = mi2020_init_settings; break; case ID_OV2640: gspca_dev->sd_desc = &sd_desc_ov2640; cam->cam_mode = ov2640_mode; cam->nmodes = ARRAY_SIZE(ov2640_mode); dev_init_settings = ov2640_init_settings; break; case ID_OV9655: gspca_dev->sd_desc = &sd_desc_ov9655; cam->cam_mode = ov9655_mode; cam->nmodes = ARRAY_SIZE(ov9655_mode); dev_init_settings = ov9655_init_settings; break; } dev_init_settings(gspca_dev); if (AC50Hz != 0xff) ((struct sd *) gspca_dev)->vcur.AC50Hz = AC50Hz; return 0; } /* This function is called at probe time after sd_config */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_init_at_startup(gspca_dev); } /* This function is called before to choose the alt setting */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_configure_alt(gspca_dev); } /* This function is called to start the webcam */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return sd->dev_init_pre_alt(gspca_dev); } /* This function is called to stop the webcam */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->gspca_dev.present) return; return sd->dev_post_unset_alt(gspca_dev); } /* This function is called when an image is being received */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; static s32 nSkipped; s32 mode = (s32) gspca_dev->curr_mode; s32 nToSkip = sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1); /* Test only against 0202h, so endianess does not matter */ switch (*(s16 *) data) { case 0x0202: /* End of frame, start a new one */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); nSkipped = 0; if (sd->nbIm >= 0 && sd->nbIm < 10) sd->nbIm++; gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); break; default: data += 2; len -= 2; if (nSkipped + len <= nToSkip) nSkipped += len; else { if (nSkipped < nToSkip && nSkipped + len > nToSkip) { data += nToSkip - nSkipped; len -= nToSkip - nSkipped; nSkipped = nToSkip + 1; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } break; } } /* This function is called when an image has been read */ /* This function is used to monitor webcam orientation */ static void sd_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!_OV9655_) { u8 state; u8 upsideDown; /* Probe sensor orientation */ ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0000, 1, (void *)&state); /* C8/40 means upside-down (looking backwards) */ /* D8/50 means right-up (looking onwards) */ upsideDown = (state == 0xc8 || state == 0x40); if (upsideDown && sd->nbRightUp > -4) { if (sd->nbRightUp > 0) sd->nbRightUp = 0; if (sd->nbRightUp == -3) { sd->mirrorMask = 1; sd->waitSet = 1; } sd->nbRightUp--; } if (!upsideDown && sd->nbRightUp < 4) { if (sd->nbRightUp < 0) sd->nbRightUp = 0; if (sd->nbRightUp == 3) { sd->mirrorMask = 0; sd->waitSet = 1; } sd->nbRightUp++; } } if (sd->waitSet) sd->dev_camera_settings(gspca_dev); } /*=================== USB driver structure initialisation ==================*/ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x05e3, 0x0503)}, {USB_DEVICE(0x05e3, 0xf191)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc_mi1320, sizeof(struct sd), THIS_MODULE); } static void sd_disconnect(struct usb_interface *intf) { gspca_disconnect(intf); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = sd_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; /*====================== Init and Exit module functions ====================*/ module_usb_driver(sd_driver); /*==========================================================================*/ int gl860_RTx(struct gspca_dev *gspca_dev, unsigned char pref, u32 req, u16 val, u16 index, s32 len, void *pdata) { struct usb_device *udev = gspca_dev->dev; s32 r = 0; if (pref == 0x40) { /* Send */ if (len > 0) { memcpy(gspca_dev->usb_buf, pdata, len); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, pref, val, index, gspca_dev->usb_buf, len, 400 + 200 * (len > 1)); } else { r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, pref, val, index, NULL, len, 400); } } else { /* Receive */ if (len > 0) { r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, pref, val, index, gspca_dev->usb_buf, len, 400 + 200 * (len > 1)); memcpy(pdata, gspca_dev->usb_buf, len); } else { r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, pref, val, index, NULL, len, 400); } } if (r < 0) pr_err("ctrl transfer failed %4d [p%02x r%d v%04x i%04x len%d]\n", r, pref, req, val, index, len); else if (len > 1 && r < len) PERR("short ctrl transfer %d/%d", r, len); msleep(1); return r; } int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) { int n; for (n = 0; n < len; n++) { if (tbl[n].idx != 0xffff) ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, 0, NULL); else if (tbl[n].val == 0xffff) break; else msleep(tbl[n].val); } return n; } int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len, int n) { while (++n < len) { if (tbl[n].idx != 0xffff) ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, 0, NULL); else if (tbl[n].val == 0xffff) break; else msleep(tbl[n].val); } return n; } void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len) { int n; for (n = 0; n < len; n++) { if (memcmp(tbl[n].data, "\xff\xff\xff", 3) != 0) ctrl_out(gspca_dev, 0x40, 3, 0x7a00, tbl[n].idx, 3, tbl[n].data); else msleep(tbl[n].idx); } } static int gl860_guess_sensor(struct gspca_dev *gspca_dev, u16 vendor_id, u16 product_id) { struct sd *sd = (struct sd *) gspca_dev; u8 probe, nb26, nb96, nOV, ntry; if (product_id == 0xf191) sd->sensor = ID_MI1320; if (sd->sensor == 0xff) { ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe); ctrl_in(gspca_dev, 0xc0, 2, 0x0000, 0x0004, 1, &probe); ctrl_out(gspca_dev, 0x40, 1, 0x0000, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0010, 0x0010, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0008, 0x00c0, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c1, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0001, 0x00c2, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0020, 0x0006, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x006a, 0x000d, 0, NULL); msleep(56); PDEBUG(D_PROBE, "probing for sensor MI2020 or OVXXXX"); nOV = 0; for (ntry = 0; ntry < 4; ntry++) { ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x0063, 0x0006, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x7a00, 0x8030, 0, NULL); msleep(10); ctrl_in(gspca_dev, 0xc0, 2, 0x7a00, 0x8030, 1, &probe); PDEBUG(D_PROBE, "probe=0x%02x", probe); if (probe == 0xff) nOV++; } if (nOV) { PDEBUG(D_PROBE, "0xff -> OVXXXX"); PDEBUG(D_PROBE, "probing for sensor OV2640 or OV9655"); nb26 = nb96 = 0; for (ntry = 0; ntry < 4; ntry++) { ctrl_out(gspca_dev, 0x40, 1, 0x0040, 0x0000, 0, NULL); msleep(3); ctrl_out(gspca_dev, 0x40, 1, 0x6000, 0x800a, 0, NULL); msleep(10); /* Wait for 26(OV2640) or 96(OV9655) */ ctrl_in(gspca_dev, 0xc0, 2, 0x6000, 0x800a, 1, &probe); if (probe == 0x26 || probe == 0x40) { PDEBUG(D_PROBE, "probe=0x%02x -> OV2640", probe); sd->sensor = ID_OV2640; nb26 += 4; break; } if (probe == 0x96 || probe == 0x55) { PDEBUG(D_PROBE, "probe=0x%02x -> OV9655", probe); sd->sensor = ID_OV9655; nb96 += 4; break; } PDEBUG(D_PROBE, "probe=0x%02x", probe); if (probe == 0x00) nb26++; if (probe == 0xff) nb96++; msleep(3); } if (nb26 < 4 && nb96 < 4) return -1; } else { PDEBUG(D_PROBE, "Not any 0xff -> MI2020"); sd->sensor = ID_MI2020; } } if (_MI1320_) { PDEBUG(D_PROBE, "05e3:f191 sensor MI1320 (1.3M)"); } else if (_MI2020_) { PDEBUG(D_PROBE, "05e3:0503 sensor MI2020 (2.0M)"); } else if (_OV9655_) { PDEBUG(D_PROBE, "05e3:0503 sensor OV9655 (1.3M)"); } else if (_OV2640_) { PDEBUG(D_PROBE, "05e3:0503 sensor OV2640 (2.0M)"); } else { PDEBUG(D_PROBE, "***** Unknown sensor *****"); return -1; } return 0; }
gpl-2.0
marc1706/hd2_kernel
arch/sparc/kernel/sun4m_irq.c
2780
14158
/* * sun4m irq support * * djhr: Hacked out of irq.c into a CPU dependent version. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com) * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) */ #include <asm/timer.h> #include <asm/traps.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/cacheflush.h> #include "irq.h" #include "kernel.h" /* Sample sun4m IRQ layout: * * 0x22 - Power * 0x24 - ESP SCSI * 0x26 - Lance ethernet * 0x2b - Floppy * 0x2c - Zilog uart * 0x32 - SBUS level 0 * 0x33 - Parallel port, SBUS level 1 * 0x35 - SBUS level 2 * 0x37 - SBUS level 3 * 0x39 - Audio, Graphics card, SBUS level 4 * 0x3b - SBUS level 5 * 0x3d - SBUS level 6 * * Each interrupt source has a mask bit in the interrupt registers. * When the mask bit is set, this blocks interrupt deliver. So you * clear the bit to enable the interrupt. * * Interrupts numbered less than 0x10 are software triggered interrupts * and unused by Linux. * * Interrupt level assignment on sun4m: * * level source * ------------------------------------------------------------ * 1 softint-1 * 2 softint-2, VME/SBUS level 1 * 3 softint-3, VME/SBUS level 2 * 4 softint-4, onboard SCSI * 5 softint-5, VME/SBUS level 3 * 6 softint-6, onboard ETHERNET * 7 softint-7, VME/SBUS level 4 * 8 softint-8, onboard VIDEO * 9 softint-9, VME/SBUS level 5, Module Interrupt * 10 softint-10, system counter/timer * 11 softint-11, VME/SBUS level 6, Floppy * 12 softint-12, Keyboard/Mouse, Serial * 13 softint-13, VME/SBUS level 7, ISDN Audio * 14 softint-14, per-processor counter/timer * 15 softint-15, Asynchronous Errors (broadcast) * * Each interrupt source is masked distinctly in the sun4m interrupt * registers. The PIL level alone is therefore ambiguous, since multiple * interrupt sources map to a single PIL. * * This ambiguity is resolved in the 'intr' property for device nodes * in the OF device tree. Each 'intr' property entry is composed of * two 32-bit words. The first word is the IRQ priority value, which * is what we're intersted in. The second word is the IRQ vector, which * is unused. * * The low 4 bits of the IRQ priority indicate the PIL, and the upper * 4 bits indicate onboard vs. SBUS leveled vs. VME leveled. 0x20 * means onboard, 0x30 means SBUS leveled, and 0x40 means VME leveled. * * For example, an 'intr' IRQ priority value of 0x24 is onboard SCSI * whereas a value of 0x33 is SBUS level 2. Here are some sample * 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and * Tadpole S3 GX systems. * * esp: 0x24 onboard ESP SCSI * le: 0x26 onboard Lance ETHERNET * p9100: 0x32 SBUS level 1 P9100 video * bpp: 0x33 SBUS level 2 BPP parallel port device * DBRI: 0x39 SBUS level 5 DBRI ISDN audio * SUNW,leo: 0x39 SBUS level 5 LEO video * pcmcia: 0x3b SBUS level 6 PCMCIA controller * uctrl: 0x3b SBUS level 6 UCTRL device * modem: 0x3d SBUS level 7 MODEM * zs: 0x2c onboard keyboard/mouse/serial * floppy: 0x2b onboard Floppy * power: 0x22 onboard power device (XXX unknown mask bit XXX) */ /* Code in entry.S needs to get at these register mappings. */ struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; struct sun4m_irq_global __iomem *sun4m_irq_global; struct sun4m_handler_data { bool percpu; long mask; }; /* Dave Redman (djhr@tadpole.co.uk) * The sun4m interrupt registers. */ #define SUN4M_INT_ENABLE 0x80000000 #define SUN4M_INT_E14 0x00000080 #define SUN4M_INT_E10 0x00080000 #define SUN4M_HARD_INT(x) (0x000000001 << (x)) #define SUN4M_SOFT_INT(x) (0x000010000 << (x)) #define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */ #define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */ #define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */ #define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */ #define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */ #define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */ #define SUN4M_INT_MODULE 0x00200000 /* module interrupt */ #define SUN4M_INT_VIDEO 0x00100000 /* onboard video */ #define SUN4M_INT_REALTIME 0x00080000 /* system timer */ #define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */ #define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */ #define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */ #define SUN4M_INT_SERIAL 0x00008000 /* serial ports */ #define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */ #define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */ #define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */ #define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \ SUN4M_INT_M2S_WRITE_ERR | \ SUN4M_INT_ECC_ERR | \ SUN4M_INT_VME_ERR) #define SUN4M_INT_SBUS(x) (1 << (x+7)) #define SUN4M_INT_VME(x) (1 << (x)) /* Interrupt levels used by OBP */ #define OBP_INT_LEVEL_SOFT 0x10 #define OBP_INT_LEVEL_ONBOARD 0x20 #define OBP_INT_LEVEL_SBUS 0x30 #define OBP_INT_LEVEL_VME 0x40 #define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10) #define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14) static unsigned long sun4m_imask[0x50] = { /* 0x00 - SMP */ 0, SUN4M_SOFT_INT(1), SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7), SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9), SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), /* 0x10 - soft */ 0, SUN4M_SOFT_INT(1), SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), SUN4M_SOFT_INT(6), SUN4M_SOFT_INT(7), SUN4M_SOFT_INT(8), SUN4M_SOFT_INT(9), SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), /* 0x20 - onboard */ 0, 0, 0, 0, SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0, SUN4M_INT_VIDEO, SUN4M_INT_MODULE, SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY, (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR, /* 0x30 - sbus */ 0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1), 0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3), 0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5), 0, SUN4M_INT_SBUS(6), 0, 0, /* 0x40 - vme */ 0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1), 0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3), 0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5), 0, SUN4M_INT_VME(6), 0, 0 }; static void sun4m_mask_irq(struct irq_data *data) { struct sun4m_handler_data *handler_data = data->handler_data; int cpu = smp_processor_id(); if (handler_data->mask) { unsigned long flags; local_irq_save(flags); if (handler_data->percpu) { sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set); } else { sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set); } local_irq_restore(flags); } } static void sun4m_unmask_irq(struct irq_data *data) { struct sun4m_handler_data *handler_data = data->handler_data; int cpu = smp_processor_id(); if (handler_data->mask) { unsigned long flags; local_irq_save(flags); if (handler_data->percpu) { sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear); } else { sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear); } local_irq_restore(flags); } } static unsigned int sun4m_startup_irq(struct irq_data *data) { irq_link(data->irq); sun4m_unmask_irq(data); return 0; } static void sun4m_shutdown_irq(struct irq_data *data) { sun4m_mask_irq(data); irq_unlink(data->irq); } static struct irq_chip sun4m_irq = { .name = "sun4m", .irq_startup = sun4m_startup_irq, .irq_shutdown = sun4m_shutdown_irq, .irq_mask = sun4m_mask_irq, .irq_unmask = sun4m_unmask_irq, }; static unsigned int sun4m_build_device_irq(struct platform_device *op, unsigned int real_irq) { struct sun4m_handler_data *handler_data; unsigned int irq; unsigned int pil; if (real_irq >= OBP_INT_LEVEL_VME) { prom_printf("Bogus sun4m IRQ %u\n", real_irq); prom_halt(); } pil = (real_irq & 0xf); irq = irq_alloc(real_irq, pil); if (irq == 0) goto out; handler_data = irq_get_handler_data(irq); if (unlikely(handler_data)) goto out; handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC); if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n"); prom_halt(); } handler_data->mask = sun4m_imask[real_irq]; handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; irq_set_chip_and_handler_name(irq, &sun4m_irq, handle_level_irq, "level"); irq_set_handler_data(irq, handler_data); out: return irq; } #ifdef CONFIG_SMP static void sun4m_send_ipi(int cpu, int level) { sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set); } static void sun4m_clear_ipi(int cpu, int level) { sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->clear); } static void sun4m_set_udt(int cpu) { sbus_writel(cpu, &sun4m_irq_global->interrupt_target); } #endif struct sun4m_timer_percpu { u32 l14_limit; u32 l14_count; u32 l14_limit_noclear; u32 user_timer_start_stop; }; static struct sun4m_timer_percpu __iomem *timers_percpu[SUN4M_NCPUS]; struct sun4m_timer_global { u32 l10_limit; u32 l10_count; u32 l10_limit_noclear; u32 reserved; u32 timer_config; }; static struct sun4m_timer_global __iomem *timers_global; unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10); static void sun4m_clear_clock_irq(void) { sbus_readl(&timers_global->l10_limit); } void sun4m_nmi(struct pt_regs *regs) { unsigned long afsr, afar, si; printk(KERN_ERR "Aieee: sun4m NMI received!\n"); /* XXX HyperSparc hack XXX */ __asm__ __volatile__("mov 0x500, %%g1\n\t" "lda [%%g1] 0x4, %0\n\t" "mov 0x600, %%g1\n\t" "lda [%%g1] 0x4, %1\n\t" : "=r" (afsr), "=r" (afar)); printk(KERN_ERR "afsr=%08lx afar=%08lx\n", afsr, afar); si = sbus_readl(&sun4m_irq_global->pending); printk(KERN_ERR "si=%08lx\n", si); if (si & SUN4M_INT_MODULE_ERR) printk(KERN_ERR "Module async error\n"); if (si & SUN4M_INT_M2S_WRITE_ERR) printk(KERN_ERR "MBus/SBus async error\n"); if (si & SUN4M_INT_ECC_ERR) printk(KERN_ERR "ECC memory error\n"); if (si & SUN4M_INT_VME_ERR) printk(KERN_ERR "VME async error\n"); printk(KERN_ERR "you lose buddy boy...\n"); show_regs(regs); prom_halt(); } void sun4m_unmask_profile_irq(void) { unsigned long flags; local_irq_save(flags); sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear); local_irq_restore(flags); } void sun4m_clear_profile_irq(int cpu) { sbus_readl(&timers_percpu[cpu]->l14_limit); } static void sun4m_load_profile_irq(int cpu, unsigned int limit) { sbus_writel(limit, &timers_percpu[cpu]->l14_limit); } static void __init sun4m_init_timers(irq_handler_t counter_fn) { struct device_node *dp = of_find_node_by_name(NULL, "counter"); int i, err, len, num_cpu_timers; unsigned int irq; const u32 *addr; if (!dp) { printk(KERN_ERR "sun4m_init_timers: No 'counter' node.\n"); return; } addr = of_get_property(dp, "address", &len); of_node_put(dp); if (!addr) { printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n"); return; } num_cpu_timers = (len / sizeof(u32)) - 1; for (i = 0; i < num_cpu_timers; i++) { timers_percpu[i] = (void __iomem *) (unsigned long) addr[i]; } timers_global = (void __iomem *) (unsigned long) addr[num_cpu_timers]; sbus_writel((((1000000/HZ) + 1) << 10), &timers_global->l10_limit); master_l10_counter = &timers_global->l10_count; irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ); err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL); if (err) { printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n", err); return; } for (i = 0; i < num_cpu_timers; i++) sbus_writel(0, &timers_percpu[i]->l14_limit); if (num_cpu_timers == 4) sbus_writel(SUN4M_INT_E14, &sun4m_irq_global->mask_set); #ifdef CONFIG_SMP { unsigned long flags; struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; /* For SMP we use the level 14 ticker, however the bootup code * has copied the firmware's level 14 vector into the boot cpu's * trap table, we must fix this now or we get squashed. */ local_irq_save(flags); trap_table->inst_one = lvl14_save[0]; trap_table->inst_two = lvl14_save[1]; trap_table->inst_three = lvl14_save[2]; trap_table->inst_four = lvl14_save[3]; local_flush_cache_all(); local_irq_restore(flags); } #endif } void __init sun4m_init_IRQ(void) { struct device_node *dp = of_find_node_by_name(NULL, "interrupt"); int len, i, mid, num_cpu_iregs; const u32 *addr; if (!dp) { printk(KERN_ERR "sun4m_init_IRQ: No 'interrupt' node.\n"); return; } addr = of_get_property(dp, "address", &len); of_node_put(dp); if (!addr) { printk(KERN_ERR "sun4m_init_IRQ: No 'address' prop.\n"); return; } num_cpu_iregs = (len / sizeof(u32)) - 1; for (i = 0; i < num_cpu_iregs; i++) { sun4m_irq_percpu[i] = (void __iomem *) (unsigned long) addr[i]; } sun4m_irq_global = (void __iomem *) (unsigned long) addr[num_cpu_iregs]; local_irq_disable(); sbus_writel(~SUN4M_INT_MASKALL, &sun4m_irq_global->mask_set); for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) sbus_writel(~0x17fff, &sun4m_irq_percpu[mid]->clear); if (num_cpu_iregs == 4) sbus_writel(0, &sun4m_irq_global->interrupt_target); BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM); sparc_irq_config.init_timers = sun4m_init_timers; sparc_irq_config.build_device_irq = sun4m_build_device_irq; #ifdef CONFIG_SMP BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM); #endif /* Cannot enable interrupts until OBP ticker is disabled. */ }
gpl-2.0
bedalus/hxore
drivers/scsi/lpfc/lpfc_vport.c
2780
25287
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" #include "lpfc_vport.h" inline void lpfc_vport_set_state(struct lpfc_vport *vport, enum fc_vport_state new_state) { struct fc_vport *fc_vport = vport->fc_vport; if (fc_vport) { /* * When the transport defines fc_vport_set state we will replace * this code with the following line */ /* fc_vport_set_state(fc_vport, new_state); */ if (new_state != FC_VPORT_INITIALIZING) fc_vport->vport_last_state = fc_vport->vport_state; fc_vport->vport_state = new_state; } /* for all the error states we will set the invternal state to FAILED */ switch (new_state) { case FC_VPORT_NO_FABRIC_SUPP: case FC_VPORT_NO_FABRIC_RSCS: case FC_VPORT_FABRIC_LOGOUT: case FC_VPORT_FABRIC_REJ_WWN: case FC_VPORT_FAILED: vport->port_state = LPFC_VPORT_FAILED; break; case FC_VPORT_LINKDOWN: vport->port_state = LPFC_VPORT_UNKNOWN; break; default: /* do nothing */ break; } } static int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; spin_lock_irq(&phba->hbalock); /* Start at bit 1 because vpi zero is reserved for the physical port */ vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); if (vpi > phba->max_vpi) vpi = 0; else set_bit(vpi, phba->vpi_bmask); if (phba->sli_rev == LPFC_SLI_REV4) phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { if (vpi == 0) return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); if (phba->sli_rev == LPFC_SLI_REV4) phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } static int lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) { LPFC_MBOXQ_t *pmb; MAILBOX_t *mb; struct lpfc_dmabuf *mp; int rc; pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) { return -ENOMEM; } mb = &pmb->u.mb; rc = lpfc_read_sparam(phba, pmb, vport->vpi); if (rc) { mempool_free(pmb, phba->mbox_mem_pool); return -ENOMEM; } /* * Grab buffer pointer and clear context1 so we can use * lpfc_sli_issue_box_wait */ mp = (struct lpfc_dmabuf *) pmb->context1; pmb->context1 = NULL; pmb->vport = vport; rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); if (rc != MBX_SUCCESS) { if (signal_pending(current)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, "1830 Signal aborted mbxCmd x%x\n", mb->mbxCommand); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); if (rc != MBX_TIMEOUT) mempool_free(pmb, phba->mbox_mem_pool); return -EINTR; } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, "1818 VPort failed init, mbxCmd x%x " "READ_SPARM mbxStatus x%x, rc = x%x\n", mb->mbxCommand, mb->mbxStatus, rc); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); if (rc != MBX_TIMEOUT) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, sizeof (struct lpfc_name)); memcpy(&vport->fc_portname, &vport->fc_sparam.portName, sizeof (struct lpfc_name)); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); return 0; } static int lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, const char *name_type) { /* ensure that IEEE format 1 addresses * contain zeros in bits 59-48 */ if (!((wwn->u.wwn[0] >> 4) == 1 && ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) return 1; lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1822 Invalid %s: %02x:%02x:%02x:%02x:" "%02x:%02x:%02x:%02x\n", name_type, wwn->u.wwn[0], wwn->u.wwn[1], wwn->u.wwn[2], wwn->u.wwn[3], wwn->u.wwn[4], wwn->u.wwn[5], wwn->u.wwn[6], wwn->u.wwn[7]); return 0; } static int lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) { struct lpfc_vport *vport; unsigned long flags; spin_lock_irqsave(&phba->hbalock, flags); list_for_each_entry(vport, &phba->port_list, listentry) { if (vport == new_vport) continue; /* If they match, return not unique */ if (memcmp(&vport->fc_sparam.portName, &new_vport->fc_sparam.portName, sizeof(struct lpfc_name)) == 0) { spin_unlock_irqrestore(&phba->hbalock, flags); return 0; } } spin_unlock_irqrestore(&phba->hbalock, flags); return 1; } /** * lpfc_discovery_wait - Wait for driver discovery to quiesce * @vport: The virtual port for which this call is being executed. * * This driver calls this routine specifically from lpfc_vport_delete * to enforce a synchronous execution of vport * delete relative to discovery activities. The * lpfc_vport_delete routine should not return until it * can reasonably guarantee that discovery has quiesced. * Post FDISC LOGO, the driver must wait until its SAN teardown is * complete and all resources recovered before allowing * cleanup. * * This routine does not require any locks held. **/ static void lpfc_discovery_wait(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; uint32_t wait_flags = 0; unsigned long wait_time_max; unsigned long start_time; wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; /* * The time constraint on this loop is a balance between the * fabric RA_TOV value and dev_loss tmo. The driver's * devloss_tmo is 10 giving this loop a 3x multiplier minimally. */ wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); wait_time_max += jiffies; start_time = jiffies; while (time_before(jiffies, wait_time_max)) { if ((vport->num_disc_nodes > 0) || (vport->fc_flag & wait_flags) || ((vport->port_state > LPFC_VPORT_FAILED) && (vport->port_state < LPFC_VPORT_READY))) { lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1833 Vport discovery quiesce Wait:" " state x%x fc_flags x%x" " num_nodes x%x, waiting 1000 msecs" " total wait msecs x%x\n", vport->port_state, vport->fc_flag, vport->num_disc_nodes, jiffies_to_msecs(jiffies - start_time)); msleep(1000); } else { /* Base case. Wait variants satisfied. Break out */ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1834 Vport discovery quiesced:" " state x%x fc_flags x%x" " wait msecs x%x\n", vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); break; } } if (time_after(jiffies, wait_time_max)) lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1835 Vport discovery quiesce failed:" " state x%x fc_flags x%x wait msecs x%x\n", vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); } int lpfc_vport_create(struct fc_vport *fc_vport, bool disable) { struct lpfc_nodelist *ndlp; struct Scsi_Host *shost = fc_vport->shost; struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = pport->phba; struct lpfc_vport *vport = NULL; int instance; int vpi; int rc = VPORT_ERROR; int status; if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1808 Create VPORT failed: " "NPIV is not enabled: SLImode:%d\n", phba->sli_rev); rc = VPORT_INVAL; goto error_out; } vpi = lpfc_alloc_vpi(phba); if (vpi == 0) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1809 Create VPORT failed: " "Max VPORTs (%d) exceeded\n", phba->max_vpi); rc = VPORT_NORESOURCES; goto error_out; } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1810 Create VPORT failed: Cannot get " "instance number\n"); lpfc_free_vpi(phba, vpi); rc = VPORT_NORESOURCES; goto error_out; } vport = lpfc_create_port(phba, instance, &fc_vport->dev); if (!vport) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1811 Create VPORT failed: vpi x%x\n", vpi); lpfc_free_vpi(phba, vpi); rc = VPORT_NORESOURCES; goto error_out; } vport->vpi = vpi; lpfc_debugfs_initialize(vport); if ((status = lpfc_vport_sparm(phba, vport))) { if (status == -EINTR) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1831 Create VPORT Interrupted.\n"); rc = VPORT_ERROR; } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1813 Create VPORT failed. " "Cannot get sparam\n"); rc = VPORT_NORESOURCES; } lpfc_free_vpi(phba, vpi); destroy_port(vport); goto error_out; } u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1821 Create VPORT failed. " "Invalid WWN format\n"); lpfc_free_vpi(phba, vpi); destroy_port(vport); rc = VPORT_INVAL; goto error_out; } if (!lpfc_unique_wwpn(phba, vport)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1823 Create VPORT failed. " "Duplicate WWN on HBA\n"); lpfc_free_vpi(phba, vpi); destroy_port(vport); rc = VPORT_INVAL; goto error_out; } /* Create binary sysfs attribute for vport */ lpfc_alloc_sysfs_attr(vport); *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; /* * In SLI4, the vpi must be activated before it can be used * by the port. */ if ((phba->sli_rev == LPFC_SLI_REV4) && (pport->fc_flag & FC_VFI_REGISTERED)) { rc = lpfc_sli4_init_vpi(vport); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1838 Failed to INIT_VPI on vpi %d " "status %d\n", vpi, rc); rc = VPORT_NORESOURCES; lpfc_free_vpi(phba, vpi); goto error_out; } } else if (phba->sli_rev == LPFC_SLI_REV4) { /* * Driver cannot INIT_VPI now. Set the flags to * init_vpi when reg_vfi complete. */ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); rc = VPORT_OK; goto out; } if ((phba->link_state < LPFC_LINK_UP) || (pport->port_state < LPFC_FABRIC_CFG_LINK) || (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); rc = VPORT_OK; goto out; } if (disable) { lpfc_vport_set_state(vport, FC_VPORT_DISABLED); rc = VPORT_OK; goto out; } /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0262 No NPIV Fabric support\n"); } } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } rc = VPORT_OK; out: lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1825 Vport Created.\n"); lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); error_out: return rc; } static int disable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; long timeout; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp = lpfc_findnode_did(vport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) && phba->link_state >= LPFC_LINK_UP) { vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_issue_els_npiv_logo(vport, ndlp)) while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) timeout = schedule_timeout(timeout); } lpfc_sli_host_down(vport); /* Mark all nodes for discovery so we can remove them by * calling lpfc_cleanup_rpis(vport, 1) */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); } lpfc_cleanup_rpis(vport, 1); lpfc_stop_vport_timers(vport); lpfc_unreg_all_rpis(vport); lpfc_unreg_default_rpis(vport); /* * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1826 Vport Disabled.\n"); return VPORT_OK; } static int enable_vport(struct fc_vport *fc_vport) { struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if ((phba->link_state < LPFC_LINK_UP) || (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); return VPORT_OK; } spin_lock_irq(shost->host_lock); vport->load_flag |= FC_LOADING; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); /* Use the Physical nodes Fabric NDLP to determine if the link is * up and ready to FDISC. */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { lpfc_set_disctmo(vport); lpfc_initial_fdisc(vport); } else { lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "0264 No NPIV Fabric support\n"); } } else { lpfc_vport_set_state(vport, FC_VPORT_FAILED); } lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1827 Vport Enabled.\n"); return VPORT_OK; } int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable) { if (disable) return disable_vport(fc_vport); else return enable_vport(fc_vport); } int lpfc_vport_delete(struct fc_vport *fc_vport) { struct lpfc_nodelist *ndlp = NULL; struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost; struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1812 vport_delete failed: Cannot delete " "physical host\n"); return VPORT_ERROR; } /* If the vport is a static vport fail the deletion. */ if ((vport->vport_flag & STATIC_VPORT) && !(phba->pport->load_flag & FC_UNLOADING)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1837 vport_delete failed: Cannot delete " "static vport.\n"); return VPORT_ERROR; } spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. */ if (!(phba->pport->load_flag & FC_UNLOADING)) { int check_count = 0; while (check_count < ((phba->fc_ratov * 3) + 3) && vport->port_state > LPFC_VPORT_FAILED && vport->port_state < LPFC_VPORT_READY) { check_count++; msleep(1000); } if (vport->port_state > LPFC_VPORT_FAILED && vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } /* * This is a bit of a mess. We want to ensure the shost doesn't get * torn down until we're done with the embedded lpfc_vport structure. * * Beyond holding a reference for this function, we also need a * reference for outstanding I/O requests we schedule during delete * processing. But once we scsi_remove_host() we can no longer obtain * a reference through scsi_host_get(). * * So we take two references here. We release one reference at the * bottom of the function -- after delinking the vport. And we * release the other at the completion of the unreg_vpi that get's * initiated after we've disposed of all other resources associated * with the port. */ if (!scsi_host_get(shost)) return VPORT_INVAL; if (!scsi_host_get(shost)) { scsi_host_put(shost); return VPORT_INVAL; } lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); /* In case of driver unload, we shall not perform fabric logo as the * worker thread already stopped at this stage and, in this case, we * can safely skip the fabric logo. */ if (phba->pport->load_flag & FC_UNLOADING) { if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP) { /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) goto skip_logo; else if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) goto skip_logo; } /* Remove ndlp from vport npld list */ lpfc_dequeue_node(vport, ndlp); /* Indicate free memory when release */ spin_lock_irq(&phba->ndlp_lock); NLP_SET_FREE_REQ(ndlp); spin_unlock_irq(&phba->ndlp_lock); /* Kick off release ndlp when it can be safely done */ lpfc_nlp_put(ndlp); } goto skip_logo; } /* Otherwise, we will perform fabric logo as needed */ if (ndlp && NLP_CHK_NODE_ACT(ndlp) && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && phba->link_state >= LPFC_LINK_UP && phba->fc_topology != LPFC_TOPOLOGY_LOOP) { if (vport->cfg_enable_da_id) { timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) while (vport->ct_flags && timeout) timeout = schedule_timeout(timeout); else lpfc_printf_log(vport->phba, KERN_WARNING, LOG_VPORT, "1829 CT command failed to " "delete objects on fabric\n"); } /* First look for the Fabric ndlp */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) { /* Cannot find existing Fabric ndlp, allocate one */ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); if (!ndlp) goto skip_logo; lpfc_nlp_init(vport, ndlp, Fabric_DID); /* Indicate free memory when release */ NLP_SET_FREE_REQ(ndlp); } else { if (!NLP_CHK_NODE_ACT(ndlp)) ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) goto skip_logo; /* Remove ndlp from vport npld list */ lpfc_dequeue_node(vport, ndlp); spin_lock_irq(&phba->ndlp_lock); if (!NLP_CHK_FREE_REQ(ndlp)) /* Indicate free memory when release */ NLP_SET_FREE_REQ(ndlp); else { /* Skip this if ndlp is already in free mode */ spin_unlock_irq(&phba->ndlp_lock); goto skip_logo; } spin_unlock_irq(&phba->ndlp_lock); } if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) goto skip_logo; vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_issue_els_npiv_logo(vport, ndlp)) while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) timeout = schedule_timeout(timeout); } if (!(phba->pport->load_flag & FC_UNLOADING)) lpfc_discovery_wait(vport); skip_logo: lpfc_cleanup(vport); lpfc_sli_host_down(vport); lpfc_stop_vport_timers(vport); if (!(phba->pport->load_flag & FC_UNLOADING)) { lpfc_unreg_all_rpis(vport); lpfc_unreg_default_rpis(vport); /* * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) * does the scsi_host_put() to release the vport. */ if (lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); } else scsi_host_put(shost); lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1828 Vport Deleted.\n"); scsi_host_put(shost); return VPORT_OK; } struct lpfc_vport ** lpfc_create_vport_work_array(struct lpfc_hba *phba) { struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; spin_lock_irq(&phba->hbalock); list_for_each_entry(port_iterator, &phba->port_list, listentry) { if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { if (!(port_iterator->load_flag & FC_UNLOADING)) lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT, "1801 Create vport work array FAILED: " "cannot do scsi_host_get\n"); continue; } vports[index++] = port_iterator; } spin_unlock_irq(&phba->hbalock); return vports; } void lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) { int i; if (vports == NULL) return; for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); } /** * lpfc_vport_reset_stat_data - Reset the statistical data for the vport * @vport: Pointer to vport object. * * This function resets the statistical data for the vport. This function * is called with the host_lock held **/ void lpfc_vport_reset_stat_data(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; if (ndlp->lat_data) memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT * sizeof(struct lpfc_scsicmd_bkt)); } } /** * lpfc_alloc_bucket - Allocate data buffer required for statistical data * @vport: Pointer to vport object. * * This function allocates data buffer required for all the FC * nodes of the vport to collect statistical data. **/ void lpfc_alloc_bucket(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; kfree(ndlp->lat_data); ndlp->lat_data = NULL; if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, sizeof(struct lpfc_scsicmd_bkt), GFP_ATOMIC); if (!ndlp->lat_data) lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0287 lpfc_alloc_bucket failed to " "allocate statistical data buffer DID " "0x%x\n", ndlp->nlp_DID); } } } /** * lpfc_free_bucket - Free data buffer required for statistical data * @vport: Pointer to vport object. * * Th function frees statistical data buffer of all the FC * nodes of the vport. **/ void lpfc_free_bucket(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (!NLP_CHK_NODE_ACT(ndlp)) continue; kfree(ndlp->lat_data); ndlp->lat_data = NULL; } }
gpl-2.0
coolshou/htc_m7u_kernel-3.4.10
drivers/infiniband/core/netlink.c
3292
4989
/* * Copyright (c) 2010 Voltaire Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ #include <linux/export.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/sock.h> #include <rdma/rdma_netlink.h> struct ibnl_client { struct list_head list; int index; int nops; const struct ibnl_client_cbs *cb_table; }; static DEFINE_MUTEX(ibnl_mutex); static struct sock *nls; static LIST_HEAD(client_list); int ibnl_add_client(int index, int nops, const struct ibnl_client_cbs cb_table[]) { struct ibnl_client *cur; struct ibnl_client *nl_client; nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL); if (!nl_client) return -ENOMEM; nl_client->index = index; nl_client->nops = nops; nl_client->cb_table = cb_table; mutex_lock(&ibnl_mutex); list_for_each_entry(cur, &client_list, list) { if (cur->index == index) { pr_warn("Client for %d already exists\n", index); mutex_unlock(&ibnl_mutex); kfree(nl_client); return -EINVAL; } } list_add_tail(&nl_client->list, &client_list); mutex_unlock(&ibnl_mutex); return 0; } EXPORT_SYMBOL(ibnl_add_client); int ibnl_remove_client(int index) { struct ibnl_client *cur, *next; mutex_lock(&ibnl_mutex); list_for_each_entry_safe(cur, next, &client_list, list) { if (cur->index == index) { list_del(&(cur->list)); mutex_unlock(&ibnl_mutex); kfree(cur); return 0; } } pr_warn("Can't remove callback for client idx %d. Not found\n", index); mutex_unlock(&ibnl_mutex); return -EINVAL; } EXPORT_SYMBOL(ibnl_remove_client); void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, int len, int client, int op) { unsigned char *prev_tail; prev_tail = skb_tail_pointer(skb); *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, NLM_F_MULTI); (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; return NLMSG_DATA(*nlh); nlmsg_failure: nlmsg_trim(skb, prev_tail); return NULL; } EXPORT_SYMBOL(ibnl_put_msg); int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, int len, void *data, int type) { unsigned char *prev_tail; prev_tail = skb_tail_pointer(skb); NLA_PUT(skb, type, len, data); nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; return 0; nla_put_failure: nlmsg_trim(skb, prev_tail - nlh->nlmsg_len); return -EMSGSIZE; } EXPORT_SYMBOL(ibnl_put_attr); static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { struct ibnl_client *client; int type = nlh->nlmsg_type; int index = RDMA_NL_GET_CLIENT(type); int op = RDMA_NL_GET_OP(type); list_for_each_entry(client, &client_list, list) { if (client->index == index) { if (op < 0 || op >= client->nops || !client->cb_table[RDMA_NL_GET_OP(op)].dump) return -EINVAL; { struct netlink_dump_control c = { .dump = client->cb_table[op].dump, }; return netlink_dump_start(nls, skb, nlh, &c); } } } pr_info("Index %d wasn't found in client list\n", index); return -EINVAL; } static void ibnl_rcv(struct sk_buff *skb) { mutex_lock(&ibnl_mutex); netlink_rcv_skb(skb, &ibnl_rcv_msg); mutex_unlock(&ibnl_mutex); } int __init ibnl_init(void) { nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, NULL, THIS_MODULE); if (!nls) { pr_warn("Failed to create netlink socket\n"); return -ENOMEM; } return 0; } void ibnl_cleanup(void) { struct ibnl_client *cur, *next; mutex_lock(&ibnl_mutex); list_for_each_entry_safe(cur, next, &client_list, list) { list_del(&(cur->list)); kfree(cur); } mutex_unlock(&ibnl_mutex); netlink_kernel_release(nls); }
gpl-2.0
Fusion-Devices/android_kernel_lge_mako
arch/powerpc/platforms/52xx/mpc52xx_pic.c
4572
16971
/* * * Programmable Interrupt Controller functions for the Freescale MPC52xx. * * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2006 bplan GmbH * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com> * Copyright (C) 2003 Montavista Software, Inc * * Based on the code from the 2.4 kernel by * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ /* * This is the device driver for the MPC5200 interrupt controller. * * hardware overview * ----------------- * The MPC5200 interrupt controller groups the all interrupt sources into * three groups called 'critical', 'main', and 'peripheral'. The critical * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC, * gpios, and the general purpose timers. Peripheral group contains the * remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet, * USB, DMA, etc). * * virqs * ----- * The Linux IRQ subsystem requires that each irq source be assigned a * system wide unique IRQ number starting at 1 (0 means no irq). Since * systems can have multiple interrupt controllers, the virtual IRQ (virq) * infrastructure lets each interrupt controller to define a local set * of IRQ numbers and the virq infrastructure maps those numbers into * a unique range of the global IRQ# space. * * To define a range of virq numbers for this controller, this driver first * assigns a number to each of the irq groups (called the level 1 or L1 * value). Within each group individual irq sources are also assigned a * number, as defined by the MPC5200 user guide, and refers to it as the * level 2 or L2 value. The virq number is determined by shifting up the * L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value. * * For example, the TMR0 interrupt is irq 9 in the main group. The * virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9). * * The observant reader will also notice that this driver defines a 4th * interrupt group called 'bestcomm'. The bestcomm group isn't physically * part of the MPC5200 interrupt controller, but it is used here to assign * a separate virq number for each bestcomm task (since any of the 16 * bestcomm tasks can cause the bestcomm interrupt to be raised). When a * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines * which task needs servicing and returns the irq number for that task. This * allows drivers which use bestcomm to define their own interrupt handlers. * * irq_chip structures * ------------------- * For actually manipulating IRQs (masking, enabling, clearing, etc) this * driver defines four separate 'irq_chip' structures, one for the main * group, one for the peripherals group, one for the bestcomm group and one * for external interrupts. The irq_chip structures provide the hooks needed * to manipulate each IRQ source, and since each group is has a separate set * of registers for controlling the irq, it makes sense to divide up the * hooks along those lines. * * You'll notice that there is not an irq_chip for the critical group and * you'll also notice that there is an irq_chip defined for external * interrupts even though there is no external interrupt group. The reason * for this is that the four external interrupts are all managed with the same * register even though one of the external IRQs is in the critical group and * the other three are in the main group. For this reason it makes sense for * the 4 external irqs to be managed using a separate set of hooks. The * reason there is no crit irq_chip is that of the 3 irqs in the critical * group, only external interrupt is actually support at this time by this * driver and since external interrupt is the only one used, it can just * be directed to make use of the external irq irq_chip. * * device tree bindings * -------------------- * The device tree bindings for this controller reflect the two level * organization of irqs in the device. #interrupt-cells = <3> where the * first cell is the group number [0..3], the second cell is the irq * number in the group, and the third cell is the sense type (level/edge). * For reference, the following is a list of the interrupt property values * associated with external interrupt sources on the MPC5200 (just because * it is non-obvious to determine what the interrupts property should be * when reading the mpc5200 manual and it is a frequently asked question). * * External interrupts: * <0 0 n> external irq0, n is sense (n=0: level high, * <1 1 n> external irq1, n is sense n=1: edge rising, * <1 2 n> external irq2, n is sense n=2: edge falling, * <1 3 n> external irq3, n is sense n=3: level low) */ #undef DEBUG #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/mpc52xx.h> /* HW IRQ mapping */ #define MPC52xx_IRQ_L1_CRIT (0) #define MPC52xx_IRQ_L1_MAIN (1) #define MPC52xx_IRQ_L1_PERP (2) #define MPC52xx_IRQ_L1_SDMA (3) #define MPC52xx_IRQ_L1_OFFSET (6) #define MPC52xx_IRQ_L1_MASK (0x00c0) #define MPC52xx_IRQ_L2_MASK (0x003f) #define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0) /* MPC5200 device tree match tables */ static struct of_device_id mpc52xx_pic_ids[] __initdata = { { .compatible = "fsl,mpc5200-pic", }, { .compatible = "mpc5200-pic", }, {} }; static struct of_device_id mpc52xx_sdma_ids[] __initdata = { { .compatible = "fsl,mpc5200-bestcomm", }, { .compatible = "mpc5200-bestcomm", }, {} }; static struct mpc52xx_intr __iomem *intr; static struct mpc52xx_sdma __iomem *sdma; static struct irq_domain *mpc52xx_irqhost = NULL; static unsigned char mpc52xx_map_senses[4] = { IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_LOW, }; /* Utility functions */ static inline void io_be_setbit(u32 __iomem *addr, int bitno) { out_be32(addr, in_be32(addr) | (1 << bitno)); } static inline void io_be_clrbit(u32 __iomem *addr, int bitno) { out_be32(addr, in_be32(addr) & ~(1 << bitno)); } /* * IRQ[0-3] interrupt irq_chip */ static void mpc52xx_extirq_mask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->ctrl, 11 - l2irq); } static void mpc52xx_extirq_unmask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 11 - l2irq); } static void mpc52xx_extirq_ack(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->ctrl, 27-l2irq); } static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) { u32 ctrl_reg, type; int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; void *handler = handle_level_irq; pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, (int) irqd_to_hwirq(d), l2irq, flow_type); switch (flow_type) { case IRQF_TRIGGER_HIGH: type = 0; break; case IRQF_TRIGGER_RISING: type = 1; handler = handle_edge_irq; break; case IRQF_TRIGGER_FALLING: type = 2; handler = handle_edge_irq; break; case IRQF_TRIGGER_LOW: type = 3; break; default: type = 0; } ctrl_reg = in_be32(&intr->ctrl); ctrl_reg &= ~(0x3 << (22 - (l2irq * 2))); ctrl_reg |= (type << (22 - (l2irq * 2))); out_be32(&intr->ctrl, ctrl_reg); __irq_set_handler_locked(d->irq, handler); return 0; } static struct irq_chip mpc52xx_extirq_irqchip = { .name = "MPC52xx External", .irq_mask = mpc52xx_extirq_mask, .irq_unmask = mpc52xx_extirq_unmask, .irq_ack = mpc52xx_extirq_ack, .irq_set_type = mpc52xx_extirq_set_type, }; /* * Main interrupt irq_chip */ static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type) { return 0; /* Do nothing so that the sense mask will get updated */ } static void mpc52xx_main_mask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->main_mask, 16 - l2irq); } static void mpc52xx_main_unmask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->main_mask, 16 - l2irq); } static struct irq_chip mpc52xx_main_irqchip = { .name = "MPC52xx Main", .irq_mask = mpc52xx_main_mask, .irq_mask_ack = mpc52xx_main_mask, .irq_unmask = mpc52xx_main_unmask, .irq_set_type = mpc52xx_null_set_type, }; /* * Peripherals interrupt irq_chip */ static void mpc52xx_periph_mask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&intr->per_mask, 31 - l2irq); } static void mpc52xx_periph_unmask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&intr->per_mask, 31 - l2irq); } static struct irq_chip mpc52xx_periph_irqchip = { .name = "MPC52xx Peripherals", .irq_mask = mpc52xx_periph_mask, .irq_mask_ack = mpc52xx_periph_mask, .irq_unmask = mpc52xx_periph_unmask, .irq_set_type = mpc52xx_null_set_type, }; /* * SDMA interrupt irq_chip */ static void mpc52xx_sdma_mask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_setbit(&sdma->IntMask, l2irq); } static void mpc52xx_sdma_unmask(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; io_be_clrbit(&sdma->IntMask, l2irq); } static void mpc52xx_sdma_ack(struct irq_data *d) { int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; out_be32(&sdma->IntPend, 1 << l2irq); } static struct irq_chip mpc52xx_sdma_irqchip = { .name = "MPC52xx SDMA", .irq_mask = mpc52xx_sdma_mask, .irq_unmask = mpc52xx_sdma_unmask, .irq_ack = mpc52xx_sdma_ack, .irq_set_type = mpc52xx_null_set_type, }; /** * mpc52xx_is_extirq - Returns true if hwirq number is for an external IRQ */ static int mpc52xx_is_extirq(int l1, int l2) { return ((l1 == 0) && (l2 == 0)) || ((l1 == 1) && (l2 >= 1) && (l2 <= 3)); } /** * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property */ static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { int intrvect_l1; int intrvect_l2; int intrvect_type; int intrvect_linux; if (intsize != 3) return -1; intrvect_l1 = (int)intspec[0]; intrvect_l2 = (int)intspec[1]; intrvect_type = (int)intspec[2] & 0x3; intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK; intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK; *out_hwirq = intrvect_linux; *out_flags = IRQ_TYPE_LEVEL_LOW; if (mpc52xx_is_extirq(intrvect_l1, intrvect_l2)) *out_flags = mpc52xx_map_senses[intrvect_type]; pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1, intrvect_l2); return 0; } /** * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure */ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t irq) { int l1irq; int l2irq; struct irq_chip *irqchip; void *hndlr; int type; u32 reg; l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET; l2irq = irq & MPC52xx_IRQ_L2_MASK; /* * External IRQs are handled differently by the hardware so they are * handled by a dedicated irq_chip structure. */ if (mpc52xx_is_extirq(l1irq, l2irq)) { reg = in_be32(&intr->ctrl); type = mpc52xx_map_senses[(reg >> (22 - l2irq * 2)) & 0x3]; if ((type == IRQ_TYPE_EDGE_FALLING) || (type == IRQ_TYPE_EDGE_RISING)) hndlr = handle_edge_irq; else hndlr = handle_level_irq; irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", __func__, l2irq, virq, (int)irq, type); return 0; } /* It is an internal SOC irq. Choose the correct irq_chip */ switch (l1irq) { case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break; case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; default: pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); return -EINVAL; } irq_set_chip_and_handler(virq, irqchip, handle_level_irq); pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); return 0; } static const struct irq_domain_ops mpc52xx_irqhost_ops = { .xlate = mpc52xx_irqhost_xlate, .map = mpc52xx_irqhost_map, }; /** * mpc52xx_init_irq - Initialize and register with the virq subsystem * * Hook for setting up IRQs on an mpc5200 system. A pointer to this function * is to be put into the machine definition structure. * * This function searches the device tree for an MPC5200 interrupt controller, * initializes it, and registers it with the virq subsystem. */ void __init mpc52xx_init_irq(void) { u32 intr_ctrl; struct device_node *picnode; struct device_node *np; /* Remap the necessary zones */ picnode = of_find_matching_node(NULL, mpc52xx_pic_ids); intr = of_iomap(picnode, 0); if (!intr) panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. " "Check node !"); np = of_find_matching_node(NULL, mpc52xx_sdma_ids); sdma = of_iomap(np, 0); of_node_put(np); if (!sdma) panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. " "Check node !"); pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr); /* Disable all interrupt sources. */ out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */ out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */ out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */ out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */ intr_ctrl = in_be32(&intr->ctrl); intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */ intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */ 0x00001000 | /* MEE master external enable */ 0x00000000 | /* 0 means disable IRQ 0-3 */ 0x00000001; /* CEb route critical normally */ out_be32(&intr->ctrl, intr_ctrl); /* Zero a bunch of the priority settings. */ out_be32(&intr->per_pri1, 0); out_be32(&intr->per_pri2, 0); out_be32(&intr->per_pri3, 0); out_be32(&intr->main_pri1, 0); out_be32(&intr->main_pri2, 0); /* * As last step, add an irq host to translate the real * hw irq information provided by the ofw to linux virq */ mpc52xx_irqhost = irq_domain_add_linear(picnode, MPC52xx_IRQ_HIGHTESTHWIRQ, &mpc52xx_irqhost_ops, NULL); if (!mpc52xx_irqhost) panic(__FILE__ ": Cannot allocate the IRQ host\n"); irq_set_default_host(mpc52xx_irqhost); pr_info("MPC52xx PIC is up and running!\n"); } /** * mpc52xx_get_irq - Get pending interrupt number hook function * * Called by the interrupt handler to determine what IRQ handler needs to be * executed. * * Status of pending interrupts is determined by reading the encoded status * register. The encoded status register has three fields; one for each of the * types of interrupts defined by the controller - 'critical', 'main' and * 'peripheral'. This function reads the status register and returns the IRQ * number associated with the highest priority pending interrupt. 'Critical' * interrupts have the highest priority, followed by 'main' interrupts, and * then 'peripheral'. * * The mpc5200 interrupt controller can be configured to boost the priority * of individual 'peripheral' interrupts. If this is the case then a special * value will appear in either the crit or main fields indicating a high * or medium priority peripheral irq has occurred. * * This function checks each of the 3 irq request fields and returns the * first pending interrupt that it finds. * * This function also identifies a 4th type of interrupt; 'bestcomm'. Each * bestcomm DMA task can raise the bestcomm peripheral interrupt. When this * occurs at task-specific IRQ# is decoded so that each task can have its * own IRQ handler. */ unsigned int mpc52xx_get_irq(void) { u32 status; int irq; status = in_be32(&intr->enc_status); if (status & 0x00000400) { /* critical */ irq = (status >> 8) & 0x3; if (irq == 2) /* high priority peripheral */ goto peripheral; irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET); } else if (status & 0x00200000) { /* main */ irq = (status >> 16) & 0x1f; if (irq == 4) /* low priority peripheral */ goto peripheral; irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET); } else if (status & 0x20000000) { /* peripheral */ peripheral: irq = (status >> 24) & 0x1f; if (irq == 0) { /* bestcomm */ status = in_be32(&sdma->IntPend); irq = ffs(status) - 1; irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET); } else { irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); } } else { return NO_IRQ; } return irq_linear_revmap(mpc52xx_irqhost, irq); }
gpl-2.0
varund7726/android_kernel_oneplus_msm8974
arch/x86/kernel/step.c
4572
5655
/* * x86 single-step support code, common to 32-bit and 64-bit. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <asm/desc.h> unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->ip; seg = regs->cs & 0xffff; if (v8086_mode(regs)) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { struct desc_struct *desc; unsigned long base; seg &= ~7UL; mutex_lock(&child->mm->context.lock); if (unlikely((seg >> 3) >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; base = get_desc_base(desc); /* 16-bit code segment? */ if (!desc->d) addr &= 0xffff; addr += base; } mutex_unlock(&child->mm->context.lock); } return addr; } static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) { int i, copied; unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { /* popf and iret */ case 0x9d: case 0xcf: return 1; /* CHECKME: 64 65 */ /* opcode and address size prefixes */ case 0x66: case 0x67: continue; /* irrelevant prefixes (segment overrides and repeats) */ case 0x26: case 0x2e: case 0x36: case 0x3e: case 0x64: case 0x65: case 0xf0: case 0xf2: case 0xf3: continue; #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ continue; #endif /* CHECKME: f2, f3 */ /* * pushf: NOTE! We should probably not let * the user see the TF bit being set. But * it's more pain than it's worth to avoid * it, and a debugger could emulate this * all in user space if it _really_ cares. */ case 0x9c: default: return 0; } } return 0; } /* * Enable single-stepping. Return nonzero if user mode is not using TF itself. */ static int enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); unsigned long oflags; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state so we don't wrongly set TIF_FORCED_TF below. * If enable_single_step() was used last and that is what * set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are * already set and our bookkeeping is fine. */ if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP))) regs->flags |= X86_EFLAGS_TF; /* * Always set TIF_SINGLESTEP - this guarantees that * we single-step system calls etc.. This will also * cause us to set TF when returning to user mode. */ set_tsk_thread_flag(child, TIF_SINGLESTEP); oflags = regs->flags; /* Set TF on the kernel stack.. */ regs->flags |= X86_EFLAGS_TF; /* * ..but if TF is changed by the instruction we will trace, * don't mark it as being "us" that set it, so that we * won't clear it by hand later. * * Note that if we don't actually execute the popf because * of a signal arriving right now or suchlike, we will lose * track of the fact that it really was "us" that set it. */ if (is_setting_trap_flag(child, regs)) { clear_tsk_thread_flag(child, TIF_FORCED_TF); return 0; } /* * If TF was already set, check whether it was us who set it. * If not, we should never attempt a block step. */ if (oflags & X86_EFLAGS_TF) return test_tsk_thread_flag(child, TIF_FORCED_TF); set_tsk_thread_flag(child, TIF_FORCED_TF); return 1; } /* * Enable single or block step. */ static void enable_step(struct task_struct *child, bool block) { /* * Make sure block stepping (BTF) is not enabled unless it should be. * Note that we don't try to worry about any is_setting_trap_flag() * instructions after the first when using block stepping. * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ if (enable_single_step(child) && block) { unsigned long debugctl = get_debugctlmsr(); debugctl |= DEBUGCTLMSR_BTF; update_debugctlmsr(debugctl); set_tsk_thread_flag(child, TIF_BLOCKSTEP); } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); debugctl &= ~DEBUGCTLMSR_BTF; update_debugctlmsr(debugctl); clear_tsk_thread_flag(child, TIF_BLOCKSTEP); } } void user_enable_single_step(struct task_struct *child) { enable_step(child, 0); } void user_enable_block_step(struct task_struct *child) { enable_step(child, 1); } void user_disable_single_step(struct task_struct *child) { /* * Make sure block stepping (BTF) is disabled. */ if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); debugctl &= ~DEBUGCTLMSR_BTF; update_debugctlmsr(debugctl); clear_tsk_thread_flag(child, TIF_BLOCKSTEP); } /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); /* But touch TF only if it was set by us.. */ if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) task_pt_regs(child)->flags &= ~X86_EFLAGS_TF; }
gpl-2.0
mlachwani/Android_4.4.2_MotoG_Kernel
arch/arm/mach-mxs/mach-tx28.c
4828
5131
/* * Copyright (C) 2010 <LW@KARO-electronics.de> * * based on: mach-mx28_evk.c * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_gpio.h> #include <linux/i2c.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/iomux-mx28.h> #include "devices-mx28.h" #include "module-tx28.h" #define TX28_STK5_GPIO_LED MXS_GPIO_NR(4, 10) static const iomux_cfg_t tx28_stk5v3_pads[] __initconst = { /* LED */ MX28_PAD_ENET0_RXD3__GPIO_4_10 | MXS_PAD_3V3 | MXS_PAD_4MA | MXS_PAD_NOPULL, /* framebuffer */ #define LCD_MODE (MXS_PAD_3V3 | MXS_PAD_4MA) MX28_PAD_LCD_D00__LCD_D0 | LCD_MODE, MX28_PAD_LCD_D01__LCD_D1 | LCD_MODE, MX28_PAD_LCD_D02__LCD_D2 | LCD_MODE, MX28_PAD_LCD_D03__LCD_D3 | LCD_MODE, MX28_PAD_LCD_D04__LCD_D4 | LCD_MODE, MX28_PAD_LCD_D05__LCD_D5 | LCD_MODE, MX28_PAD_LCD_D06__LCD_D6 | LCD_MODE, MX28_PAD_LCD_D07__LCD_D7 | LCD_MODE, MX28_PAD_LCD_D08__LCD_D8 | LCD_MODE, MX28_PAD_LCD_D09__LCD_D9 | LCD_MODE, MX28_PAD_LCD_D10__LCD_D10 | LCD_MODE, MX28_PAD_LCD_D11__LCD_D11 | LCD_MODE, MX28_PAD_LCD_D12__LCD_D12 | LCD_MODE, MX28_PAD_LCD_D13__LCD_D13 | LCD_MODE, MX28_PAD_LCD_D14__LCD_D14 | LCD_MODE, MX28_PAD_LCD_D15__LCD_D15 | LCD_MODE, MX28_PAD_LCD_D16__LCD_D16 | LCD_MODE, MX28_PAD_LCD_D17__LCD_D17 | LCD_MODE, MX28_PAD_LCD_D18__LCD_D18 | LCD_MODE, MX28_PAD_LCD_D19__LCD_D19 | LCD_MODE, MX28_PAD_LCD_D20__LCD_D20 | LCD_MODE, MX28_PAD_LCD_D21__LCD_D21 | LCD_MODE, MX28_PAD_LCD_D22__LCD_D22 | LCD_MODE, MX28_PAD_LCD_D23__LCD_D23 | LCD_MODE, MX28_PAD_LCD_RD_E__LCD_VSYNC | LCD_MODE, MX28_PAD_LCD_WR_RWN__LCD_HSYNC | LCD_MODE, MX28_PAD_LCD_RS__LCD_DOTCLK | LCD_MODE, MX28_PAD_LCD_CS__LCD_CS | LCD_MODE, MX28_PAD_LCD_VSYNC__LCD_VSYNC | LCD_MODE, MX28_PAD_LCD_HSYNC__LCD_HSYNC | LCD_MODE, MX28_PAD_LCD_DOTCLK__LCD_DOTCLK | LCD_MODE, MX28_PAD_LCD_ENABLE__GPIO_1_31 | LCD_MODE, MX28_PAD_LCD_RESET__GPIO_3_30 | LCD_MODE, MX28_PAD_PWM0__PWM_0 | LCD_MODE, /* UART1 */ MX28_PAD_AUART0_CTS__DUART_RX, MX28_PAD_AUART0_RTS__DUART_TX, MX28_PAD_AUART0_TX__DUART_RTS, MX28_PAD_AUART0_RX__DUART_CTS, /* UART2 */ MX28_PAD_AUART1_RX__AUART1_RX, MX28_PAD_AUART1_TX__AUART1_TX, MX28_PAD_AUART1_RTS__AUART1_RTS, MX28_PAD_AUART1_CTS__AUART1_CTS, /* CAN */ MX28_PAD_GPMI_RDY2__CAN0_TX, MX28_PAD_GPMI_RDY3__CAN0_RX, /* I2C */ MX28_PAD_I2C0_SCL__I2C0_SCL, MX28_PAD_I2C0_SDA__I2C0_SDA, /* TSC2007 */ MX28_PAD_SAIF0_MCLK__GPIO_3_20 | MXS_PAD_3V3 | MXS_PAD_4MA | MXS_PAD_PULLUP, /* MMC0 */ MX28_PAD_SSP0_DATA0__SSP0_D0 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA1__SSP0_D1 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA2__SSP0_D2 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DATA3__SSP0_D3 | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_CMD__SSP0_CMD | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_PULLUP), MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT | (MXS_PAD_8MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), MX28_PAD_SSP0_SCK__SSP0_SCK | (MXS_PAD_12MA | MXS_PAD_3V3 | MXS_PAD_NOPULL), }; static const struct gpio_led tx28_stk5v3_leds[] __initconst = { { .name = "GPIO-LED", .default_trigger = "heartbeat", .gpio = TX28_STK5_GPIO_LED, }, }; static const struct gpio_led_platform_data tx28_stk5v3_led_data __initconst = { .leds = tx28_stk5v3_leds, .num_leds = ARRAY_SIZE(tx28_stk5v3_leds), }; static struct spi_board_info tx28_spi_board_info[] = { { .modalias = "spidev", .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 1, .controller_data = (void *)SPI_GPIO_NO_CHIPSELECT, .mode = SPI_MODE_0, }, }; static struct i2c_board_info tx28_stk5v3_i2c_boardinfo[] __initdata = { { I2C_BOARD_INFO("ds1339", 0x68), }, }; static struct mxs_mmc_platform_data tx28_mmc0_pdata __initdata = { .wp_gpio = -EINVAL, .flags = SLOTF_4_BIT_CAPABLE, }; static void __init tx28_stk5v3_init(void) { mxs_iomux_setup_multiple_pads(tx28_stk5v3_pads, ARRAY_SIZE(tx28_stk5v3_pads)); mx28_add_duart(); /* UART1 */ mx28_add_auart(1); /* UART2 */ tx28_add_fec0(); /* spi via ssp will be added when available */ spi_register_board_info(tx28_spi_board_info, ARRAY_SIZE(tx28_spi_board_info)); gpio_led_register_device(0, &tx28_stk5v3_led_data); mx28_add_mxs_i2c(0); i2c_register_board_info(0, tx28_stk5v3_i2c_boardinfo, ARRAY_SIZE(tx28_stk5v3_i2c_boardinfo)); mx28_add_mxs_mmc(0, &tx28_mmc0_pdata); mx28_add_rtc_stmp3xxx(); } static void __init tx28_timer_init(void) { mx28_clocks_init(); } static struct sys_timer tx28_timer = { .init = tx28_timer_init, }; MACHINE_START(TX28, "Ka-Ro electronics TX28 module") .map_io = mx28_map_io, .init_irq = mx28_init_irq, .timer = &tx28_timer, .init_machine = tx28_stk5v3_init, .restart = mxs_restart, MACHINE_END
gpl-2.0
shankarathi07/linux_motorola_lollipop
drivers/mtd/lpddr/lpddr_cmds.c
7388
20629
/* * LPDDR flash memory device operations. This module provides read, write, * erase, lock/unlock support for LPDDR flash memories * (C) 2008 Korolev Alexey <akorolev@infradead.org> * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com> * Many thanks to Roman Borisov for initial enabling * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * TODO: * Implement VPP management * Implement XIP support * Implement OTP support */ #include <linux/mtd/pfow.h> #include <linux/mtd/qinfo.h> #include <linux/slab.h> #include <linux/module.h> static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, size_t *retlen, u_char *buf); static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr); static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, size_t *retlen, void **mtdbuf, resource_size_t *phys); static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); static int get_chip(struct map_info *map, struct flchip *chip, int mode); static int chip_ready(struct map_info *map, struct flchip *chip, int mode); static void put_chip(struct map_info *map, struct flchip *chip); struct mtd_info *lpddr_cmdset(struct map_info *map) { struct lpddr_private *lpddr = map->fldrv_priv; struct flchip_shared *shared; struct flchip *chip; struct mtd_info *mtd; int numchips; int i, j; mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) { printk(KERN_ERR "Failed to allocate memory for MTD device\n"); return NULL; } mtd->priv = map; mtd->type = MTD_NORFLASH; /* Fill in the default mtd operations */ mtd->_read = lpddr_read; mtd->type = MTD_NORFLASH; mtd->flags = MTD_CAP_NORFLASH; mtd->flags &= ~MTD_BIT_WRITEABLE; mtd->_erase = lpddr_erase; mtd->_write = lpddr_write_buffers; mtd->_writev = lpddr_writev; mtd->_lock = lpddr_lock; mtd->_unlock = lpddr_unlock; if (map_is_linear(map)) { mtd->_point = lpddr_point; mtd->_unpoint = lpddr_unpoint; } mtd->size = 1 << lpddr->qinfo->DevSizeShift; mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; mtd->writesize = 1 << lpddr->qinfo->BufSizeShift; shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips, GFP_KERNEL); if (!shared) { kfree(lpddr); kfree(mtd); return NULL; } chip = &lpddr->chips[0]; numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; for (i = 0; i < numchips; i++) { shared[i].writing = shared[i].erasing = NULL; mutex_init(&shared[i].lock); for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { *chip = lpddr->chips[i]; chip->start += j << lpddr->chipshift; chip->oldstate = chip->state = FL_READY; chip->priv = &shared[i]; /* those should be reset too since they create memory references. */ init_waitqueue_head(&chip->wq); mutex_init(&chip->mutex); chip++; } } return mtd; } EXPORT_SYMBOL(lpddr_cmdset); static int wait_for_ready(struct map_info *map, struct flchip *chip, unsigned int chip_op_time) { unsigned int timeo, reset_timeo, sleep_time; unsigned int dsr; flstate_t chip_state = chip->state; int ret = 0; /* set our timeout to 8 times the expected delay */ timeo = chip_op_time * 8; if (!timeo) timeo = 500000; reset_timeo = timeo; sleep_time = chip_op_time / 2; for (;;) { dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); if (dsr & DSR_READY_STATUS) break; if (!timeo) { printk(KERN_ERR "%s: Flash timeout error state %d \n", map->name, chip_state); ret = -ETIME; break; } /* OK Still waiting. Drop the lock, wait a while and retry. */ mutex_unlock(&chip->mutex); if (sleep_time >= 1000000/HZ) { /* * Half of the normal delay still remaining * can be performed with a sleeping delay instead * of busy waiting. */ msleep(sleep_time/1000); timeo -= sleep_time; sleep_time = 1000000/HZ; } else { udelay(1); cond_resched(); timeo--; } mutex_lock(&chip->mutex); while (chip->state != chip_state) { /* Someone's suspended the operation: sleep */ DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); } if (chip->erase_suspended || chip->write_suspended) { /* Suspend has occurred while sleep: reset timeout */ timeo = reset_timeo; chip->erase_suspended = chip->write_suspended = 0; } } /* check status for errors */ if (dsr & DSR_ERR) { /* Clear DSR*/ map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n", map->name, dsr); print_drs_error(dsr); ret = -EIO; } chip->state = FL_READY; return ret; } static int get_chip(struct map_info *map, struct flchip *chip, int mode) { int ret; DECLARE_WAITQUEUE(wait, current); retry: if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING) && chip->state != FL_SYNCING) { /* * OK. We have possibility for contension on the write/erase * operations which are global to the real chip and not per * partition. So let's fight it over in the partition which * currently has authority on the operation. * * The rules are as follows: * * - any write operation must own shared->writing. * * - any erase operation must own _both_ shared->writing and * shared->erasing. * * - contension arbitration is handled in the owner's context. * * The 'shared' struct can be read and/or written only when * its lock is taken. */ struct flchip_shared *shared = chip->priv; struct flchip *contender; mutex_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* * The engine to perform desired operation on this * partition is already in use by someone else. * Let's fight over it in the context of the chip * currently using it. If it is possible to suspend, * that other partition will do just that, otherwise * it'll happily send us to sleep. In any case, when * get_chip returns success we're clear to go ahead. */ ret = mutex_trylock(&contender->mutex); mutex_unlock(&shared->lock); if (!ret) goto retry; mutex_unlock(&chip->mutex); ret = chip_ready(map, contender, mode); mutex_lock(&chip->mutex); if (ret == -EAGAIN) { mutex_unlock(&contender->mutex); goto retry; } if (ret) { mutex_unlock(&contender->mutex); return ret; } mutex_lock(&shared->lock); /* We should not own chip if it is already in FL_SYNCING * state. Put contender and retry. */ if (chip->state == FL_SYNCING) { put_chip(map, contender); mutex_unlock(&contender->mutex); goto retry; } mutex_unlock(&contender->mutex); } /* Check if we have suspended erase on this chip. Must sleep in such a case. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { mutex_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); goto retry; } /* We now own it */ shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; mutex_unlock(&shared->lock); } ret = chip_ready(map, chip, mode); if (ret == -EAGAIN) goto retry; return ret; } static int chip_ready(struct map_info *map, struct flchip *chip, int mode) { struct lpddr_private *lpddr = map->fldrv_priv; int ret = 0; DECLARE_WAITQUEUE(wait, current); /* Prevent setting state FL_SYNCING for chip in suspended state. */ if (FL_SYNCING == mode && FL_READY != chip->oldstate) goto sleep; switch (chip->state) { case FL_READY: case FL_JEDEC_QUERY: return 0; case FL_ERASING: if (!lpddr->qinfo->SuspEraseSupp || !(mode == FL_READY || mode == FL_POINT)) goto sleep; map_write(map, CMD(LPDDR_SUSPEND), map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; ret = wait_for_ready(map, chip, 0); if (ret) { /* Oops. something got wrong. */ /* Resume and pretend we weren't here. */ put_chip(map, chip); printk(KERN_ERR "%s: suspend operation failed." "State may be wrong \n", map->name); return -EIO; } chip->erase_suspended = 1; chip->state = FL_READY; return 0; /* Erase suspend */ case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) return 0; default: sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); mutex_lock(&chip->mutex); return -EAGAIN; } } static void put_chip(struct map_info *map, struct flchip *chip) { if (chip->priv) { struct flchip_shared *shared = chip->priv; mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; if (shared->writing && shared->writing != chip) { /* give back the ownership */ struct flchip *loaner = shared->writing; mutex_lock(&loaner->mutex); mutex_unlock(&shared->lock); mutex_unlock(&chip->mutex); put_chip(map, loaner); mutex_lock(&chip->mutex); mutex_unlock(&loaner->mutex); wake_up(&chip->wq); return; } shared->erasing = NULL; shared->writing = NULL; } else if (shared->erasing == chip && shared->writing != chip) { /* * We own the ability to erase without the ability * to write, which means the erase was suspended * and some other partition is currently writing. * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } mutex_unlock(&shared->lock); } switch (chip->oldstate) { case FL_ERASING: map_write(map, CMD(LPDDR_RESUME), map->pfow_base + PFOW_COMMAND_CODE); map_write(map, CMD(LPDDR_START_EXECUTION), map->pfow_base + PFOW_COMMAND_EXECUTE); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; case FL_READY: break; default: printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n", map->name, chip->oldstate); } wake_up(&chip->wq); } int do_write_buffer(struct map_info *map, struct flchip *chip, unsigned long adr, const struct kvec **pvec, unsigned long *pvec_seek, int len) { struct lpddr_private *lpddr = map->fldrv_priv; map_word datum; int ret, wbufsize, word_gap, words; const struct kvec *vec; unsigned long vec_seek; unsigned long prog_buf_ofs; wbufsize = 1 << lpddr->qinfo->BufSizeShift; mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_WRITING); if (ret) { mutex_unlock(&chip->mutex); return ret; } /* Figure out the number of words to write */ word_gap = (-adr & (map_bankwidth(map)-1)); words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); if (!word_gap) { words--; } else { word_gap = map_bankwidth(map) - word_gap; adr -= word_gap; datum = map_word_ff(map); } /* Write data */ /* Get the program buffer offset from PFOW register data first*/ prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map, map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET)); vec = *pvec; vec_seek = *pvec_seek; do { int n = map_bankwidth(map) - word_gap; if (n > vec->iov_len - vec_seek) n = vec->iov_len - vec_seek; if (n > len) n = len; if (!word_gap && (len < map_bankwidth(map))) datum = map_word_ff(map); datum = map_word_load_partial(map, datum, vec->iov_base + vec_seek, word_gap, n); len -= n; word_gap += n; if (!len || word_gap == map_bankwidth(map)) { map_write(map, datum, prog_buf_ofs); prog_buf_ofs += map_bankwidth(map); word_gap = 0; } vec_seek += n; if (vec_seek == vec->iov_len) { vec++; vec_seek = 0; } } while (len); *pvec = vec; *pvec_seek = vec_seek; /* GO GO GO */ send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL); chip->state = FL_WRITING; ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); if (ret) { printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n", map->name, ret, adr); goto out; } out: put_chip(map, chip); mutex_unlock(&chip->mutex); return ret; } int do_erase_oneblock(struct mtd_info *mtd, loff_t adr) { struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift; struct flchip *chip = &lpddr->chips[chipnum]; int ret; mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_ERASING); if (ret) { mutex_unlock(&chip->mutex); return ret; } send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); chip->state = FL_ERASING; ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000); if (ret) { printk(KERN_WARNING"%s Erase block error %d at : %llx\n", map->name, ret, adr); goto out; } out: put_chip(map, chip); mutex_unlock(&chip->mutex); return ret; } static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift; struct flchip *chip = &lpddr->chips[chipnum]; int ret = 0; mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_READY); if (ret) { mutex_unlock(&chip->mutex); return ret; } map_copy_from(map, buf, adr, len); *retlen = len; put_chip(map, chip); mutex_unlock(&chip->mutex); return ret; } static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, size_t *retlen, void **mtdbuf, resource_size_t *phys) { struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift; unsigned long ofs, last_end = 0; struct flchip *chip = &lpddr->chips[chipnum]; int ret = 0; if (!map->virt) return -EINVAL; /* ofs: offset within the first chip that the first read should start */ ofs = adr - (chipnum << lpddr->chipshift); *mtdbuf = (void *)map->virt + chip->start + ofs; while (len) { unsigned long thislen; if (chipnum >= lpddr->numchips) break; /* We cannot point across chips that are virtually disjoint */ if (!last_end) last_end = chip->start; else if (chip->start != last_end) break; if ((len + ofs - 1) >> lpddr->chipshift) thislen = (1<<lpddr->chipshift) - ofs; else thislen = len; /* get the chip */ mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_POINT); mutex_unlock(&chip->mutex); if (ret) break; chip->state = FL_POINT; chip->ref_point_counter++; *retlen += thislen; len -= thislen; ofs = 0; last_end += 1 << lpddr->chipshift; chipnum++; chip = &lpddr->chips[chipnum]; } return 0; } static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) { struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift, err = 0; unsigned long ofs; /* ofs: offset within the first chip that the first read should start */ ofs = adr - (chipnum << lpddr->chipshift); while (len) { unsigned long thislen; struct flchip *chip; chip = &lpddr->chips[chipnum]; if (chipnum >= lpddr->numchips) break; if ((len + ofs - 1) >> lpddr->chipshift) thislen = (1<<lpddr->chipshift) - ofs; else thislen = len; mutex_lock(&chip->mutex); if (chip->state == FL_POINT) { chip->ref_point_counter--; if (chip->ref_point_counter == 0) chip->state = FL_READY; } else { printk(KERN_WARNING "%s: Warning: unpoint called on non" "pointed region\n", map->name); err = -EINVAL; } put_chip(map, chip); mutex_unlock(&chip->mutex); len -= thislen; ofs = 0; chipnum++; } return err; } static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct kvec vec; vec.iov_base = (void *) buf; vec.iov_len = len; return lpddr_writev(mtd, &vec, 1, to, retlen); } static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int ret = 0; int chipnum; unsigned long ofs, vec_seek, i; int wbufsize = 1 << lpddr->qinfo->BufSizeShift; size_t len = 0; for (i = 0; i < count; i++) len += vecs[i].iov_len; if (!len) return 0; chipnum = to >> lpddr->chipshift; ofs = to; vec_seek = 0; do { /* We must not cross write block boundaries */ int size = wbufsize - (ofs & (wbufsize-1)); if (size > len) size = len; ret = do_write_buffer(map, &lpddr->chips[chipnum], ofs, &vecs, &vec_seek, size); if (ret) return ret; ofs += size; (*retlen) += size; len -= size; /* Be nice and reschedule with the chip in a usable * state for other processes */ cond_resched(); } while (len); return 0; } static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr) { unsigned long ofs, len; int ret; struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int size = 1 << lpddr->qinfo->UniformBlockSizeShift; ofs = instr->addr; len = instr->len; while (len > 0) { ret = do_erase_oneblock(mtd, ofs); if (ret) return ret; ofs += size; len -= size; } instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); return 0; } #define DO_XXLOCK_LOCK 1 #define DO_XXLOCK_UNLOCK 2 int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk) { int ret = 0; struct map_info *map = mtd->priv; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift; struct flchip *chip = &lpddr->chips[chipnum]; mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_LOCKING); if (ret) { mutex_unlock(&chip->mutex); return ret; } if (thunk == DO_XXLOCK_LOCK) { send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL); chip->state = FL_LOCKING; } else if (thunk == DO_XXLOCK_UNLOCK) { send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL); chip->state = FL_UNLOCKING; } else BUG(); ret = wait_for_ready(map, chip, 1); if (ret) { printk(KERN_ERR "%s: block unlock error status %d \n", map->name, ret); goto out; } out: put_chip(map, chip); mutex_unlock(&chip->mutex); return ret; } static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK); } static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK); } int word_program(struct map_info *map, loff_t adr, uint32_t curval) { int ret; struct lpddr_private *lpddr = map->fldrv_priv; int chipnum = adr >> lpddr->chipshift; struct flchip *chip = &lpddr->chips[chipnum]; mutex_lock(&chip->mutex); ret = get_chip(map, chip, FL_WRITING); if (ret) { mutex_unlock(&chip->mutex); return ret; } send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval); ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime)); if (ret) { printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n", map->name, adr, curval); goto out; } out: put_chip(map, chip); mutex_unlock(&chip->mutex); return ret; } MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>"); MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
gpl-2.0
walter79/android_kernel_sony_tsubasa
net/rose/rose_link.c
7900
6932
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/netfilter.h> #include <net/rose.h> static void rose_ftimer_expiry(unsigned long); static void rose_t0timer_expiry(unsigned long); static void rose_transmit_restart_confirmation(struct rose_neigh *neigh); static void rose_transmit_restart_request(struct rose_neigh *neigh); void rose_start_ftimer(struct rose_neigh *neigh) { del_timer(&neigh->ftimer); neigh->ftimer.data = (unsigned long)neigh; neigh->ftimer.function = &rose_ftimer_expiry; neigh->ftimer.expires = jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout); add_timer(&neigh->ftimer); } static void rose_start_t0timer(struct rose_neigh *neigh) { del_timer(&neigh->t0timer); neigh->t0timer.data = (unsigned long)neigh; neigh->t0timer.function = &rose_t0timer_expiry; neigh->t0timer.expires = jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout); add_timer(&neigh->t0timer); } void rose_stop_ftimer(struct rose_neigh *neigh) { del_timer(&neigh->ftimer); } void rose_stop_t0timer(struct rose_neigh *neigh) { del_timer(&neigh->t0timer); } int rose_ftimer_running(struct rose_neigh *neigh) { return timer_pending(&neigh->ftimer); } static int rose_t0timer_running(struct rose_neigh *neigh) { return timer_pending(&neigh->t0timer); } static void rose_ftimer_expiry(unsigned long param) { } static void rose_t0timer_expiry(unsigned long param) { struct rose_neigh *neigh = (struct rose_neigh *)param; rose_transmit_restart_request(neigh); neigh->dce_mode = 0; rose_start_t0timer(neigh); } /* * Interface to ax25_send_frame. Changes my level 2 callsign depending * on whether we have a global ROSE callsign or use the default port * callsign. */ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) { ax25_address *rose_call; ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) rose_call = (ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; ax25s = neigh->ax25; neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); if (ax25s) ax25_cb_put(ax25s); return neigh->ax25 != NULL; } /* * Interface to ax25_link_up. Changes my level 2 callsign depending * on whether we have a global ROSE callsign or use the default port * callsign. */ static int rose_link_up(struct rose_neigh *neigh) { ax25_address *rose_call; ax25_cb *ax25s; if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) rose_call = (ax25_address *)neigh->dev->dev_addr; else rose_call = &rose_callsign; ax25s = neigh->ax25; neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); if (ax25s) ax25_cb_put(ax25s); return neigh->ax25 != NULL; } /* * This handles all restart and diagnostic frames. */ void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigned short frametype) { struct sk_buff *skbn; switch (frametype) { case ROSE_RESTART_REQUEST: rose_stop_t0timer(neigh); neigh->restarted = 1; neigh->dce_mode = (skb->data[3] == ROSE_DTE_ORIGINATED); rose_transmit_restart_confirmation(neigh); break; case ROSE_RESTART_CONFIRMATION: rose_stop_t0timer(neigh); neigh->restarted = 1; break; case ROSE_DIAGNOSTIC: printk(KERN_WARNING "ROSE: received diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); break; default: printk(KERN_WARNING "ROSE: received unknown %02X with LCI 000\n", frametype); break; } if (neigh->restarted) { while ((skbn = skb_dequeue(&neigh->queue)) != NULL) if (!rose_send_frame(skbn, neigh)) kfree_skb(skbn); } } /* * This routine is called when a Restart Request is needed */ static void rose_transmit_restart_request(struct rose_neigh *neigh) { struct sk_buff *skb; unsigned char *dptr; int len; len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3; if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN); dptr = skb_put(skb, ROSE_MIN_LEN + 3); *dptr++ = AX25_P_ROSE; *dptr++ = ROSE_GFI; *dptr++ = 0x00; *dptr++ = ROSE_RESTART_REQUEST; *dptr++ = ROSE_DTE_ORIGINATED; *dptr++ = 0; if (!rose_send_frame(skb, neigh)) kfree_skb(skb); } /* * This routine is called when a Restart Confirmation is needed */ static void rose_transmit_restart_confirmation(struct rose_neigh *neigh) { struct sk_buff *skb; unsigned char *dptr; int len; len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN); dptr = skb_put(skb, ROSE_MIN_LEN + 1); *dptr++ = AX25_P_ROSE; *dptr++ = ROSE_GFI; *dptr++ = 0x00; *dptr++ = ROSE_RESTART_CONFIRMATION; if (!rose_send_frame(skb, neigh)) kfree_skb(skb); } /* * This routine is called when a Clear Request is needed outside of the context * of a connected socket. */ void rose_transmit_clear_request(struct rose_neigh *neigh, unsigned int lci, unsigned char cause, unsigned char diagnostic) { struct sk_buff *skb; unsigned char *dptr; int len; len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3; if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN); dptr = skb_put(skb, ROSE_MIN_LEN + 3); *dptr++ = AX25_P_ROSE; *dptr++ = ((lci >> 8) & 0x0F) | ROSE_GFI; *dptr++ = ((lci >> 0) & 0xFF); *dptr++ = ROSE_CLEAR_REQUEST; *dptr++ = cause; *dptr++ = diagnostic; if (!rose_send_frame(skb, neigh)) kfree_skb(skb); } void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh) { unsigned char *dptr; if (neigh->loopback) { rose_loopback_queue(skb, neigh); return; } if (!rose_link_up(neigh)) neigh->restarted = 0; dptr = skb_push(skb, 1); *dptr++ = AX25_P_ROSE; if (neigh->restarted) { if (!rose_send_frame(skb, neigh)) kfree_skb(skb); } else { skb_queue_tail(&neigh->queue, skb); if (!rose_t0timer_running(neigh)) { rose_transmit_restart_request(neigh); neigh->dce_mode = 0; rose_start_t0timer(neigh); } } }
gpl-2.0
assusdan/cyanogenmod_kernel_hs_puref
Documentation/vDSO/parse_vdso.c
8412
6759
/* * parse_vdso.c: Linux reference vDSO parser * Written by Andrew Lutomirski, 2011. * * This code is meant to be linked in to various programs that run on Linux. * As such, it is available with as few restrictions as possible. This file * is licensed under the Creative Commons Zero License, version 1.0, * available at http://creativecommons.org/publicdomain/zero/1.0/legalcode * * The vDSO is a regular ELF DSO that the kernel maps into user space when * it starts a program. It works equally well in statically and dynamically * linked binaries. * * This code is tested on x86_64. In principle it should work on any 64-bit * architecture that has a vDSO. */ #include <stdbool.h> #include <stdint.h> #include <string.h> #include <elf.h> /* * To use this vDSO parser, first call one of the vdso_init_* functions. * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR * to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv. * Then call vdso_sym for each symbol you want. For example, to look up * gettimeofday on x86_64, use: * * <some pointer> = vdso_sym("LINUX_2.6", "gettimeofday"); * or * <some pointer> = vdso_sym("LINUX_2.6", "__vdso_gettimeofday"); * * vdso_sym will return 0 if the symbol doesn't exist or if the init function * failed or was not called. vdso_sym is a little slow, so its return value * should be cached. * * vdso_sym is threadsafe; the init functions are not. * * These are the prototypes: */ extern void vdso_init_from_auxv(void *auxv); extern void vdso_init_from_sysinfo_ehdr(uintptr_t base); extern void *vdso_sym(const char *version, const char *name); /* And here's the code. */ #ifndef __x86_64__ # error Not yet ported to non-x86_64 architectures #endif static struct vdso_info { bool valid; /* Load information */ uintptr_t load_addr; uintptr_t load_offset; /* load_addr - recorded vaddr */ /* Symbol table */ Elf64_Sym *symtab; const char *symstrings; Elf64_Word *bucket, *chain; Elf64_Word nbucket, nchain; /* Version table */ Elf64_Versym *versym; Elf64_Verdef *verdef; } vdso_info; /* Straight from the ELF specification. */ static unsigned long elf_hash(const unsigned char *name) { unsigned long h = 0, g; while (*name) { h = (h << 4) + *name++; if (g = h & 0xf0000000) h ^= g >> 24; h &= ~g; } return h; } void vdso_init_from_sysinfo_ehdr(uintptr_t base) { size_t i; bool found_vaddr = false; vdso_info.valid = false; vdso_info.load_addr = base; Elf64_Ehdr *hdr = (Elf64_Ehdr*)base; Elf64_Phdr *pt = (Elf64_Phdr*)(vdso_info.load_addr + hdr->e_phoff); Elf64_Dyn *dyn = 0; /* * We need two things from the segment table: the load offset * and the dynamic table. */ for (i = 0; i < hdr->e_phnum; i++) { if (pt[i].p_type == PT_LOAD && !found_vaddr) { found_vaddr = true; vdso_info.load_offset = base + (uintptr_t)pt[i].p_offset - (uintptr_t)pt[i].p_vaddr; } else if (pt[i].p_type == PT_DYNAMIC) { dyn = (Elf64_Dyn*)(base + pt[i].p_offset); } } if (!found_vaddr || !dyn) return; /* Failed */ /* * Fish out the useful bits of the dynamic table. */ Elf64_Word *hash = 0; vdso_info.symstrings = 0; vdso_info.symtab = 0; vdso_info.versym = 0; vdso_info.verdef = 0; for (i = 0; dyn[i].d_tag != DT_NULL; i++) { switch (dyn[i].d_tag) { case DT_STRTAB: vdso_info.symstrings = (const char *) ((uintptr_t)dyn[i].d_un.d_ptr + vdso_info.load_offset); break; case DT_SYMTAB: vdso_info.symtab = (Elf64_Sym *) ((uintptr_t)dyn[i].d_un.d_ptr + vdso_info.load_offset); break; case DT_HASH: hash = (Elf64_Word *) ((uintptr_t)dyn[i].d_un.d_ptr + vdso_info.load_offset); break; case DT_VERSYM: vdso_info.versym = (Elf64_Versym *) ((uintptr_t)dyn[i].d_un.d_ptr + vdso_info.load_offset); break; case DT_VERDEF: vdso_info.verdef = (Elf64_Verdef *) ((uintptr_t)dyn[i].d_un.d_ptr + vdso_info.load_offset); break; } } if (!vdso_info.symstrings || !vdso_info.symtab || !hash) return; /* Failed */ if (!vdso_info.verdef) vdso_info.versym = 0; /* Parse the hash table header. */ vdso_info.nbucket = hash[0]; vdso_info.nchain = hash[1]; vdso_info.bucket = &hash[2]; vdso_info.chain = &hash[vdso_info.nbucket + 2]; /* That's all we need. */ vdso_info.valid = true; } static bool vdso_match_version(Elf64_Versym ver, const char *name, Elf64_Word hash) { /* * This is a helper function to check if the version indexed by * ver matches name (which hashes to hash). * * The version definition table is a mess, and I don't know how * to do this in better than linear time without allocating memory * to build an index. I also don't know why the table has * variable size entries in the first place. * * For added fun, I can't find a comprehensible specification of how * to parse all the weird flags in the table. * * So I just parse the whole table every time. */ /* First step: find the version definition */ ver &= 0x7fff; /* Apparently bit 15 means "hidden" */ Elf64_Verdef *def = vdso_info.verdef; while(true) { if ((def->vd_flags & VER_FLG_BASE) == 0 && (def->vd_ndx & 0x7fff) == ver) break; if (def->vd_next == 0) return false; /* No definition. */ def = (Elf64_Verdef *)((char *)def + def->vd_next); } /* Now figure out whether it matches. */ Elf64_Verdaux *aux = (Elf64_Verdaux*)((char *)def + def->vd_aux); return def->vd_hash == hash && !strcmp(name, vdso_info.symstrings + aux->vda_name); } void *vdso_sym(const char *version, const char *name) { unsigned long ver_hash; if (!vdso_info.valid) return 0; ver_hash = elf_hash(version); Elf64_Word chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) { Elf64_Sym *sym = &vdso_info.symtab[chain]; /* Check for a defined global or weak function w/ right name. */ if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) continue; if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && ELF64_ST_BIND(sym->st_info) != STB_WEAK) continue; if (sym->st_shndx == SHN_UNDEF) continue; if (strcmp(name, vdso_info.symstrings + sym->st_name)) continue; /* Check symbol version. */ if (vdso_info.versym && !vdso_match_version(vdso_info.versym[chain], version, ver_hash)) continue; return (void *)(vdso_info.load_offset + sym->st_value); } return 0; } void vdso_init_from_auxv(void *auxv) { Elf64_auxv_t *elf_auxv = auxv; for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++) { if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) { vdso_init_from_sysinfo_ehdr(elf_auxv[i].a_un.a_val); return; } } vdso_info.valid = false; }
gpl-2.0
BoostPop/kernel_lge_hammerhead
drivers/misc/altera-stapl/altera-lpt.c
13020
1747
/* * altera-lpt.c * * altera FPGA driver * * Copyright (C) Altera Corporation 1998-2001 * Copyright (C) 2010 NetUP Inc. * Copyright (C) 2010 Abylay Ospan <aospan@netup.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/io.h> #include <linux/kernel.h> #include "altera-exprt.h" static int lpt_hardware_initialized; static void byteblaster_write(int port, int data) { outb((u8)data, (u16)(port + 0x378)); }; static int byteblaster_read(int port) { int data = 0; data = inb((u16)(port + 0x378)); return data & 0xff; }; int netup_jtag_io_lpt(void *device, int tms, int tdi, int read_tdo) { int data = 0; int tdo = 0; int initial_lpt_ctrl = 0; if (!lpt_hardware_initialized) { initial_lpt_ctrl = byteblaster_read(2); byteblaster_write(2, (initial_lpt_ctrl | 0x02) & 0xdf); lpt_hardware_initialized = 1; } data = ((tdi ? 0x40 : 0) | (tms ? 0x02 : 0)); byteblaster_write(0, data); if (read_tdo) { tdo = byteblaster_read(1); tdo = ((tdo & 0x80) ? 0 : 1); } byteblaster_write(0, data | 0x01); byteblaster_write(0, data); return tdo; }
gpl-2.0
shumashv1/android_external_backports-wireless
drivers/gpu/drm/drm_crtc.c
221
101730
/* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2008 Red Hat Inc. * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/list.h> #include <linux/slab.h> #include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> /** * drm_modeset_lock_all - take all modeset locks * @dev: drm device * * This function takes all modeset locks, suitable where a more fine-grained * scheme isn't (yet) implemented. */ void drm_modeset_lock_all(struct drm_device *dev) { struct drm_crtc *crtc; mutex_lock(&dev->mode_config.mutex); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); } EXPORT_SYMBOL(drm_modeset_lock_all); /** * drm_modeset_unlock_all - drop all modeset locks * @dev: device */ void drm_modeset_unlock_all(struct drm_device *dev) { struct drm_crtc *crtc; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) mutex_unlock(&crtc->mutex); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_modeset_unlock_all); /** * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked * @dev: device */ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) { struct drm_crtc *crtc; /* Locking is currently fubar in the panic handler. */ if (oops_in_progress) return; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) WARN_ON(!mutex_is_locked(&crtc->mutex)); WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); } EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ char *fnname(int val) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(list); i++) { \ if (list[i].type == val) \ return list[i].name; \ } \ return "(unknown)"; \ } /* * Global properties */ static struct drm_prop_enum_list drm_dpms_enum_list[] = { { DRM_MODE_DPMS_ON, "On" }, { DRM_MODE_DPMS_STANDBY, "Standby" }, { DRM_MODE_DPMS_SUSPEND, "Suspend" }, { DRM_MODE_DPMS_OFF, "Off" } }; DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) /* * Optional properties */ static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { { DRM_MODE_SCALE_NONE, "None" }, { DRM_MODE_SCALE_FULLSCREEN, "Full" }, { DRM_MODE_SCALE_CENTER, "Center" }, { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = { { DRM_MODE_DITHERING_OFF, "Off" }, { DRM_MODE_DITHERING_ON, "On" }, { DRM_MODE_DITHERING_AUTO, "Automatic" }, }; /* * Non-global properties, but "required" for certain connectors. */ static struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) static struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, drm_dvi_i_subconnector_enum_list) static struct drm_prop_enum_list drm_tv_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { { DRM_MODE_DIRTY_OFF, "Off" }, { DRM_MODE_DIRTY_ON, "On" }, { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, }; struct drm_conn_prop_enum_list { int type; char *name; int count; }; /* * Connector and encoder types. */ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 }, { DRM_MODE_CONNECTOR_VGA, "VGA", 0 }, { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 }, { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 }, { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 }, { DRM_MODE_CONNECTOR_Composite, "Composite", 0 }, { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, { DRM_MODE_CONNECTOR_Component, "Component", 0 }, { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, }; char *drm_get_encoder_name(struct drm_encoder *encoder) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_encoder_enum_list[encoder->encoder_type].name, encoder->base.id); return buf; } EXPORT_SYMBOL(drm_get_encoder_name); char *drm_get_connector_name(struct drm_connector *connector) { static char buf[32]; snprintf(buf, 32, "%s-%d", drm_connector_enum_list[connector->connector_type].name, connector->connector_type_id); return buf; } EXPORT_SYMBOL(drm_get_connector_name); char *drm_get_connector_status_name(enum drm_connector_status status) { if (status == connector_status_connected) return "connected"; else if (status == connector_status_disconnected) return "disconnected"; else return "unknown"; } EXPORT_SYMBOL(drm_get_connector_status_name); /** * drm_mode_object_get - allocate a new modeset identifier * @dev: DRM device * @obj: object pointer, used to generate unique ID * @obj_type: object type * * Create a unique identifier based on @ptr in @dev's identifier space. Used * for tracking modes, CRTCs and connectors. * * RETURNS: * New unique (relative to other objects in @dev) integer identifier for the * object. */ static int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) { int ret; mutex_lock(&dev->mode_config.idr_mutex); ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL); if (ret >= 0) { /* * Set up the object linking under the protection of the idr * lock so that other users can't see inconsistent state. */ obj->id = ret; obj->type = obj_type; } mutex_unlock(&dev->mode_config.idr_mutex); return ret < 0 ? ret : 0; } /** * drm_mode_object_put - free a modeset identifer * @dev: DRM device * @object: object to free * * Free @id from @dev's unique identifier pool. */ static void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) { mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.crtc_idr, object->id); mutex_unlock(&dev->mode_config.idr_mutex); } /** * drm_mode_object_find - look up a drm object with static lifetime * @dev: drm device * @id: id of the mode object * @type: type of the mode object * * Note that framebuffers cannot be looked up with this functions - since those * are reference counted, they need special treatment. */ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; /* Framebuffers are reference counted and need their own lookup * function.*/ WARN_ON(type == DRM_MODE_OBJECT_FB); mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != type) || (obj->id != id)) obj = NULL; mutex_unlock(&dev->mode_config.idr_mutex); return obj; } EXPORT_SYMBOL(drm_mode_object_find); /** * drm_framebuffer_init - initialize a framebuffer * @dev: DRM device * @fb: framebuffer to be initialized * @funcs: ... with these functions * * Allocates an ID for the framebuffer's parent mode object, sets its mode * functions & device file and adds it to the master fd list. * * IMPORTANT: * This functions publishes the fb and makes it available for concurrent access * by other users. Which means by this point the fb _must_ be fully set up - * since all the fb attributes are invariant over its lifetime, no further * locking but only correct reference counting is required. * * RETURNS: * Zero on success, error code on failure. */ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { int ret; mutex_lock(&dev->mode_config.fb_lock); kref_init(&fb->refcount); INIT_LIST_HEAD(&fb->filp_head); fb->dev = dev; fb->funcs = funcs; ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) goto out; /* Grab the idr reference. */ drm_framebuffer_reference(fb); dev->mode_config.num_fb++; list_add(&fb->head, &dev->mode_config.fb_list); out: mutex_unlock(&dev->mode_config.fb_lock); return 0; } EXPORT_SYMBOL(drm_framebuffer_init); static void drm_framebuffer_free(struct kref *kref) { struct drm_framebuffer *fb = container_of(kref, struct drm_framebuffer, refcount); fb->funcs->destroy(fb); } static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, uint32_t id) { struct drm_mode_object *obj = NULL; struct drm_framebuffer *fb; mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id)) fb = NULL; else fb = obj_to_fb(obj); mutex_unlock(&dev->mode_config.idr_mutex); return fb; } /** * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference * @dev: drm device * @id: id of the fb object * * If successful, this grabs an additional reference to the framebuffer - * callers need to make sure to eventually unreference the returned framebuffer * again. */ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, uint32_t id) { struct drm_framebuffer *fb; mutex_lock(&dev->mode_config.fb_lock); fb = __drm_framebuffer_lookup(dev, id); if (fb) drm_framebuffer_reference(fb); mutex_unlock(&dev->mode_config.fb_lock); return fb; } EXPORT_SYMBOL(drm_framebuffer_lookup); /** * drm_framebuffer_unreference - unref a framebuffer * @fb: framebuffer to unref * * This functions decrements the fb's refcount and frees it if it drops to zero. */ void drm_framebuffer_unreference(struct drm_framebuffer *fb) { DRM_DEBUG("FB ID: %d\n", fb->base.id); kref_put(&fb->refcount, drm_framebuffer_free); } EXPORT_SYMBOL(drm_framebuffer_unreference); /** * drm_framebuffer_reference - incr the fb refcnt * @fb: framebuffer */ void drm_framebuffer_reference(struct drm_framebuffer *fb) { DRM_DEBUG("FB ID: %d\n", fb->base.id); kref_get(&fb->refcount); } EXPORT_SYMBOL(drm_framebuffer_reference); static void drm_framebuffer_free_bug(struct kref *kref) { BUG(); } static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) { DRM_DEBUG("FB ID: %d\n", fb->base.id); kref_put(&fb->refcount, drm_framebuffer_free_bug); } /* dev->mode_config.fb_lock must be held! */ static void __drm_framebuffer_unregister(struct drm_device *dev, struct drm_framebuffer *fb) { mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.crtc_idr, fb->base.id); mutex_unlock(&dev->mode_config.idr_mutex); fb->base.id = 0; __drm_framebuffer_unreference(fb); } /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister * * Drivers need to call this when cleaning up driver-private framebuffers, e.g. * those used for fbdev. Note that the caller must hold a reference of it's own, * i.e. the object may not be destroyed through this call (since it'll lead to a * locking inversion). */ void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; mutex_lock(&dev->mode_config.fb_lock); /* Mark fb as reaped and drop idr ref. */ __drm_framebuffer_unregister(dev, fb); mutex_unlock(&dev->mode_config.fb_lock); } EXPORT_SYMBOL(drm_framebuffer_unregister_private); /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove * * Cleanup references to a user-created framebuffer. This function is intended * to be used from the drivers ->destroy callback. * * Note that this function does not remove the fb from active usuage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on * the id and get back -EINVAL. Obviously no concern at driver unload time. * * Also, the framebuffer will not be removed from the lookup idr - for * user-created framebuffers this will happen in in the rmfb ioctl. For * driver-private objects (e.g. for fbdev) drivers need to explicitly call * drm_framebuffer_unregister_private. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); dev->mode_config.num_fb--; mutex_unlock(&dev->mode_config.fb_lock); } EXPORT_SYMBOL(drm_framebuffer_cleanup); /** * drm_framebuffer_remove - remove and unreference a framebuffer object * @fb: framebuffer to remove * * Scans all the CRTCs and planes in @dev's mode_config. If they're * using @fb, removes it, setting it to NULL. Then drops the reference to the * passed-in framebuffer. Might take the modeset locks. * * Note that this function optimizes the cleanup away if the caller holds the * last reference to the framebuffer. It is also guaranteed to not take the * modeset locks in this case. */ void drm_framebuffer_remove(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; struct drm_crtc *crtc; struct drm_plane *plane; struct drm_mode_set set; int ret; WARN_ON(!list_empty(&fb->filp_head)); /* * drm ABI mandates that we remove any deleted framebuffers from active * useage. But since most sane clients only remove framebuffers they no * longer need, try to optimize this away. * * Since we're holding a reference ourselves, observing a refcount of 1 * means that we're the last holder and can skip it. Also, the refcount * can never increase from 1 again, so we don't need any barriers or * locks. * * Note that userspace could try to race with use and instate a new * usage _after_ we've cleared all current ones. End result will be an * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot * in this manner. */ if (atomic_read(&fb->refcount.refcount) > 1) { drm_modeset_lock_all(dev); /* remove from any CRTC */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb == fb) { /* should turn off the crtc */ memset(&set, 0, sizeof(struct drm_mode_set)); set.crtc = crtc; set.fb = NULL; ret = drm_mode_set_config_internal(&set); if (ret) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { if (plane->fb == fb) { /* should turn off the crtc */ ret = plane->funcs->disable_plane(plane); if (ret) DRM_ERROR("failed to disable plane with busy fb\n"); /* disconnect the plane from the fb and crtc: */ __drm_framebuffer_unreference(plane->fb); plane->fb = NULL; plane->crtc = NULL; } } drm_modeset_unlock_all(dev); } drm_framebuffer_unreference(fb); } EXPORT_SYMBOL(drm_framebuffer_remove); /** * drm_crtc_init - Initialise a new CRTC object * @dev: DRM device * @crtc: CRTC object to init * @funcs: callbacks for the new CRTC * * Inits a new object created as base part of an driver crtc object. * * RETURNS: * Zero on success, error code on failure. */ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs) { int ret; crtc->dev = dev; crtc->funcs = funcs; crtc->invert_dimensions = false; drm_modeset_lock_all(dev); mutex_init(&crtc->mutex); mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) goto out; crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &dev->mode_config.crtc_list); dev->mode_config.num_crtc++; out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_crtc_init); /** * drm_crtc_cleanup - Cleans up the core crtc usage. * @crtc: CRTC to cleanup * * Cleanup @crtc. Removes from drm modesetting space * does NOT free object, caller does that. */ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; kfree(crtc->gamma_store); crtc->gamma_store = NULL; drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); dev->mode_config.num_crtc--; } EXPORT_SYMBOL(drm_crtc_cleanup); /** * drm_mode_probed_add - add a mode to a connector's probed mode list * @connector: connector the new mode * @mode: mode data * * Add @mode to @connector's mode list for later use. */ void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode) { list_add(&mode->head, &connector->probed_modes); } EXPORT_SYMBOL(drm_mode_probed_add); /** * drm_mode_remove - remove and free a mode * @connector: connector list to modify * @mode: mode to remove * * Remove @mode from @connector's mode list, then free it. */ void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode) { list_del(&mode->head); drm_mode_destroy(connector->dev, mode); } EXPORT_SYMBOL(drm_mode_remove); /** * drm_connector_init - Init a preallocated connector * @dev: DRM device * @connector: the connector to init * @funcs: callbacks for this connector * @connector_type: user visible type of the connector * * Initialises a preallocated connector. Connectors should be * subclassed as part of driver connector objects. * * RETURNS: * Zero on success, error code on failure. */ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type) { int ret; drm_modeset_lock_all(dev); ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); if (ret) goto out; connector->base.properties = &connector->properties; connector->dev = dev; connector->funcs = funcs; connector->connector_type = connector_type; connector->connector_type_id = ++drm_connector_enum_list[connector_type].count; /* TODO */ INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; list_add_tail(&connector->head, &dev->mode_config.connector_list); dev->mode_config.num_connector++; if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) drm_object_attach_property(&connector->base, dev->mode_config.edid_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.dpms_property, 0); out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_connector_init); /** * drm_connector_cleanup - cleans up an initialised connector * @connector: connector to cleanup * * Cleans up the connector but doesn't free the object. */ void drm_connector_cleanup(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; list_for_each_entry_safe(mode, t, &connector->probed_modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->modes, head) drm_mode_remove(connector, mode); drm_mode_object_put(dev, &connector->base); list_del(&connector->head); dev->mode_config.num_connector--; } EXPORT_SYMBOL(drm_connector_cleanup); void drm_connector_unplug_all(struct drm_device *dev) { struct drm_connector *connector; /* taking the mode config mutex ends up in a clash with sysfs */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) drm_sysfs_connector_remove(connector); } EXPORT_SYMBOL(drm_connector_unplug_all); int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type) { int ret; drm_modeset_lock_all(dev); ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); if (ret) goto out; encoder->dev = dev; encoder->encoder_type = encoder_type; encoder->funcs = funcs; list_add_tail(&encoder->head, &dev->mode_config.encoder_list); dev->mode_config.num_encoder++; out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_encoder_init); void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; drm_modeset_lock_all(dev); drm_mode_object_put(dev, &encoder->base); list_del(&encoder->head); dev->mode_config.num_encoder--; drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_encoder_cleanup); int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, uint32_t format_count, bool priv) { int ret; drm_modeset_lock_all(dev); ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); if (ret) goto out; plane->base.properties = &plane->properties; plane->dev = dev; plane->funcs = funcs; plane->format_types = kmalloc(sizeof(uint32_t) * format_count, GFP_KERNEL); if (!plane->format_types) { DRM_DEBUG_KMS("out of memory when allocating plane\n"); drm_mode_object_put(dev, &plane->base); ret = -ENOMEM; goto out; } memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; plane->possible_crtcs = possible_crtcs; /* private planes are not exposed to userspace, but depending on * display hardware, might be convenient to allow sharing programming * for the scanout engine with the crtc implementation. */ if (!priv) { list_add_tail(&plane->head, &dev->mode_config.plane_list); dev->mode_config.num_plane++; } else { INIT_LIST_HEAD(&plane->head); } out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_plane_init); void drm_plane_cleanup(struct drm_plane *plane) { struct drm_device *dev = plane->dev; drm_modeset_lock_all(dev); kfree(plane->format_types); drm_mode_object_put(dev, &plane->base); /* if not added to a list, it must be a private plane */ if (!list_empty(&plane->head)) { list_del(&plane->head); dev->mode_config.num_plane--; } drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_plane_cleanup); /** * drm_mode_create - create a new display mode * @dev: DRM device * * Create a new drm_display_mode, give it an ID, and return it. * * RETURNS: * Pointer to new mode on success, NULL on error. */ struct drm_display_mode *drm_mode_create(struct drm_device *dev) { struct drm_display_mode *nmode; nmode = kzalloc(sizeof(struct drm_display_mode), GFP_KERNEL); if (!nmode) return NULL; if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) { kfree(nmode); return NULL; } return nmode; } EXPORT_SYMBOL(drm_mode_create); /** * drm_mode_destroy - remove a mode * @dev: DRM device * @mode: mode to remove * * Free @mode's unique identifier, then free it. */ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) { if (!mode) return; drm_mode_object_put(dev, &mode->base); kfree(mode); } EXPORT_SYMBOL(drm_mode_destroy); static int drm_mode_create_standard_connector_properties(struct drm_device *dev) { struct drm_property *edid; struct drm_property *dpms; /* * Standard properties (apply to all connectors) */ edid = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "EDID", 0); dev->mode_config.edid_property = edid; dpms = drm_property_create_enum(dev, 0, "DPMS", drm_dpms_enum_list, ARRAY_SIZE(drm_dpms_enum_list)); dev->mode_config.dpms_property = dpms; return 0; } /** * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties * @dev: DRM device * * Called by a driver the first time a DVI-I connector is made. */ int drm_mode_create_dvi_i_properties(struct drm_device *dev) { struct drm_property *dvi_i_selector; struct drm_property *dvi_i_subconnector; if (dev->mode_config.dvi_i_select_subconnector_property) return 0; dvi_i_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_dvi_i_select_enum_list, ARRAY_SIZE(drm_dvi_i_select_enum_list)); dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_dvi_i_subconnector_enum_list, ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; return 0; } EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); /** * drm_create_tv_properties - create TV specific connector properties * @dev: DRM device * @num_modes: number of different TV formats (modes) supported * @modes: array of pointers to strings containing name of each format * * Called by a driver's TV initialization routine, this function creates * the TV specific connector properties for a given device. Caller is * responsible for allocating a list of format names and passing them to * this routine. */ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, char *modes[]) { struct drm_property *tv_selector; struct drm_property *tv_subconnector; int i; if (dev->mode_config.tv_select_subconnector_property) return 0; /* * Basic connector properties */ tv_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_tv_select_enum_list, ARRAY_SIZE(drm_tv_select_enum_list)); dev->mode_config.tv_select_subconnector_property = tv_selector; tv_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_tv_subconnector_enum_list, ARRAY_SIZE(drm_tv_subconnector_enum_list)); dev->mode_config.tv_subconnector_property = tv_subconnector; /* * Other, TV specific properties: margins & TV modes. */ dev->mode_config.tv_left_margin_property = drm_property_create_range(dev, 0, "left margin", 0, 100); dev->mode_config.tv_right_margin_property = drm_property_create_range(dev, 0, "right margin", 0, 100); dev->mode_config.tv_top_margin_property = drm_property_create_range(dev, 0, "top margin", 0, 100); dev->mode_config.tv_bottom_margin_property = drm_property_create_range(dev, 0, "bottom margin", 0, 100); dev->mode_config.tv_mode_property = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", num_modes); for (i = 0; i < num_modes; i++) drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); dev->mode_config.tv_brightness_property = drm_property_create_range(dev, 0, "brightness", 0, 100); dev->mode_config.tv_contrast_property = drm_property_create_range(dev, 0, "contrast", 0, 100); dev->mode_config.tv_flicker_reduction_property = drm_property_create_range(dev, 0, "flicker reduction", 0, 100); dev->mode_config.tv_overscan_property = drm_property_create_range(dev, 0, "overscan", 0, 100); dev->mode_config.tv_saturation_property = drm_property_create_range(dev, 0, "saturation", 0, 100); dev->mode_config.tv_hue_property = drm_property_create_range(dev, 0, "hue", 0, 100); return 0; } EXPORT_SYMBOL(drm_mode_create_tv_properties); /** * drm_mode_create_scaling_mode_property - create scaling mode property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_scaling_mode_property(struct drm_device *dev) { struct drm_property *scaling_mode; if (dev->mode_config.scaling_mode_property) return 0; scaling_mode = drm_property_create_enum(dev, 0, "scaling mode", drm_scaling_mode_enum_list, ARRAY_SIZE(drm_scaling_mode_enum_list)); dev->mode_config.scaling_mode_property = scaling_mode; return 0; } EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); /** * drm_mode_create_dithering_property - create dithering property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dithering_property(struct drm_device *dev) { struct drm_property *dithering_mode; if (dev->mode_config.dithering_mode_property) return 0; dithering_mode = drm_property_create_enum(dev, 0, "dithering", drm_dithering_mode_enum_list, ARRAY_SIZE(drm_dithering_mode_enum_list)); dev->mode_config.dithering_mode_property = dithering_mode; return 0; } EXPORT_SYMBOL(drm_mode_create_dithering_property); /** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dirty_info_property(struct drm_device *dev) { struct drm_property *dirty_info; if (dev->mode_config.dirty_info_property) return 0; dirty_info = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "dirty", drm_dirty_info_enum_list, ARRAY_SIZE(drm_dirty_info_enum_list)); dev->mode_config.dirty_info_property = dirty_info; return 0; } EXPORT_SYMBOL(drm_mode_create_dirty_info_property); static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) { uint32_t total_objects = 0; total_objects += dev->mode_config.num_crtc; total_objects += dev->mode_config.num_connector; total_objects += dev->mode_config.num_encoder; group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL); if (!group->id_list) return -ENOMEM; group->num_crtcs = 0; group->num_connectors = 0; group->num_encoders = 0; return 0; } int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; int ret; if ((ret = drm_mode_group_init(dev, group))) return ret; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) group->id_list[group->num_crtcs++] = crtc->base.id; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) group->id_list[group->num_crtcs + group->num_encoders++] = encoder->base.id; list_for_each_entry(connector, &dev->mode_config.connector_list, head) group->id_list[group->num_crtcs + group->num_encoders + group->num_connectors++] = connector->base.id; return 0; } EXPORT_SYMBOL(drm_mode_group_init_legacy_group); /** * drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo * @out: drm_mode_modeinfo struct to return to the user * @in: drm_display_mode to use * * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to * the user. */ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out, const struct drm_display_mode *in) { WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX || in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX || in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX || in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX || in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX, "timing values too large for mode info\n"); out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; } /** * drm_crtc_convert_to_umode - convert a modeinfo into a drm_display_mode * @out: drm_display_mode to return to the user * @in: drm_mode_modeinfo to use * * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to * the caller. * * RETURNS: * Zero on success, errno on failure. */ static int drm_crtc_convert_umode(struct drm_display_mode *out, const struct drm_mode_modeinfo *in) { if (in->clock > INT_MAX || in->vrefresh > INT_MAX) return -ERANGE; out->clock = in->clock; out->hdisplay = in->hdisplay; out->hsync_start = in->hsync_start; out->hsync_end = in->hsync_end; out->htotal = in->htotal; out->hskew = in->hskew; out->vdisplay = in->vdisplay; out->vsync_start = in->vsync_start; out->vsync_end = in->vsync_end; out->vtotal = in->vtotal; out->vscan = in->vscan; out->vrefresh = in->vrefresh; out->flags = in->flags; out->type = in->type; strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); out->name[DRM_DISPLAY_MODE_LEN-1] = 0; return 0; } /** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int ret = 0; int connector_count = 0; int crtc_count = 0; int fb_count = 0; int encoder_count = 0; int copied = 0, i; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; struct drm_mode_group *mode_group; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&file_priv->fbs_lock); /* * For the non-control nodes we need to limit the list of resources * by IDs in the group list for this node */ list_for_each(lh, &file_priv->fbs) fb_count++; /* handle this in 4 parts */ /* FBs */ if (card_res->count_fbs >= fb_count) { copied = 0; fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (put_user(fb->base.id, fb_id + copied)) { mutex_unlock(&file_priv->fbs_lock); return -EFAULT; } copied++; } } card_res->count_fbs = fb_count; mutex_unlock(&file_priv->fbs_lock); drm_modeset_lock_all(dev); mode_group = &file_priv->master->minor->mode_group; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each(lh, &dev->mode_config.crtc_list) crtc_count++; list_for_each(lh, &dev->mode_config.connector_list) connector_count++; list_for_each(lh, &dev->mode_config.encoder_list) encoder_count++; } else { crtc_count = mode_group->num_crtcs; connector_count = mode_group->num_connectors; encoder_count = mode_group->num_encoders; } card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; /* CRTCs */ if (card_res->count_crtcs >= crtc_count) { copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { for (i = 0; i < mode_group->num_crtcs; i++) { if (put_user(mode_group->id_list[i], crtc_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_crtcs = crtc_count; /* Encoders */ if (card_res->count_encoders >= encoder_count) { copied = 0; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, drm_get_encoder_name(encoder)); if (put_user(encoder->base.id, encoder_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { for (i = mode_group->num_crtcs; i < mode_group->num_crtcs + mode_group->num_encoders; i++) { if (put_user(mode_group->id_list[i], encoder_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_encoders = encoder_count; /* Connectors */ if (card_res->count_connectors >= connector_count) { copied = 0; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); if (put_user(connector->base.id, connector_id + copied)) { ret = -EFAULT; goto out; } copied++; } } else { int start = mode_group->num_crtcs + mode_group->num_encoders; for (i = start; i < start + mode_group->num_connectors; i++) { if (put_user(mode_group->id_list[i], connector_id + copied)) { ret = -EFAULT; goto out; } copied++; } } } card_res->count_connectors = connector_count; DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_getcrtc - get CRTC configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a CRTC configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc *crtc_resp = data; struct drm_crtc *crtc; struct drm_mode_object *obj; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); crtc_resp->x = crtc->x; crtc_resp->y = crtc->y; crtc_resp->gamma_size = crtc->gamma_size; if (crtc->fb) crtc_resp->fb_id = crtc->fb->base.id; else crtc_resp->fb_id = 0; if (crtc->enabled) { drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); crtc_resp->mode_valid = 1; } else { crtc_resp->mode_valid = 0; } out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_getconnector - get connector configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a connector configuration structure to return to the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_connector *out_resp = data; struct drm_mode_object *obj; struct drm_connector *connector; struct drm_display_mode *mode; int mode_count = 0; int props_count = 0; int encoders_count = 0; int ret = 0; int copied = 0; int i; struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *prop_ptr; uint64_t __user *prop_values; uint32_t __user *encoder_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { ret = -EINVAL; goto out; } connector = obj_to_connector(obj); props_count = connector->properties.count; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { encoders_count++; } } if (out_resp->count_modes == 0) { connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); } /* delayed so we get modes regardless of pre-fill_modes state */ list_for_each_entry(mode, &connector->modes, head) mode_count++; out_resp->connector_id = connector->base.id; out_resp->connector_type = connector->connector_type; out_resp->connector_type_id = connector->connector_type_id; out_resp->mm_width = connector->display_info.width_mm; out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; if (connector->encoder) out_resp->encoder_id = connector->encoder->base.id; else out_resp->encoder_id = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; list_for_each_entry(mode, &connector->modes, head) { drm_crtc_convert_to_umode(&u_mode, mode); if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; goto out; } copied++; } } out_resp->count_modes = mode_count; if ((out_resp->count_props >= props_count) && props_count) { copied = 0; prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr); prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr); for (i = 0; i < connector->properties.count; i++) { if (put_user(connector->properties.ids[i], prop_ptr + copied)) { ret = -EFAULT; goto out; } if (put_user(connector->properties.values[i], prop_values + copied)) { ret = -EFAULT; goto out; } copied++; } } out_resp->count_props = props_count; if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; goto out; } copied++; } } } out_resp->count_encoders = encoders_count; out: mutex_unlock(&dev->mode_config.mutex); return ret; } int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_mode_object *obj; struct drm_encoder *encoder; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); if (!obj) { ret = -EINVAL; goto out; } encoder = obj_to_encoder(obj); if (encoder->crtc) enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_getplane_res - get plane info * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Return an plane count and set of IDs. */ int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane_res *plane_resp = data; struct drm_mode_config *config; struct drm_plane *plane; uint32_t __user *plane_ptr; int copied = 0, ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); config = &dev->mode_config; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (config->num_plane && (plane_resp->count_planes >= config->num_plane)) { plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr; list_for_each_entry(plane, &config->plane_list, head) { if (put_user(plane->base.id, plane_ptr + copied)) { ret = -EFAULT; goto out; } copied++; } } plane_resp->count_planes = config->num_plane; out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_getplane - get plane info * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Return plane info, including formats supported, gamma size, any * current fb, etc. */ int drm_mode_getplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane *plane_resp = data; struct drm_mode_object *obj; struct drm_plane *plane; uint32_t __user *format_ptr; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, plane_resp->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { ret = -ENOENT; goto out; } plane = obj_to_plane(obj); if (plane->crtc) plane_resp->crtc_id = plane->crtc->base.id; else plane_resp->crtc_id = 0; if (plane->fb) plane_resp->fb_id = plane->fb->base.id; else plane_resp->fb_id = 0; plane_resp->plane_id = plane->base.id; plane_resp->possible_crtcs = plane->possible_crtcs; plane_resp->gamma_size = plane->gamma_size; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (plane->format_count && (plane_resp->count_format_types >= plane->format_count)) { format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr; if (copy_to_user(format_ptr, plane->format_types, sizeof(uint32_t) * plane->format_count)) { ret = -EFAULT; goto out; } } plane_resp->count_format_types = plane->format_count; out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_setplane - set up or tear down an plane * @dev: DRM device * @data: ioctl data* * @file_priv: DRM file info * * Set plane info, including placement, fb, scaling, and other factors. * Or pass a NULL fb to disable. */ int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_set_plane *plane_req = data; struct drm_mode_object *obj; struct drm_plane *plane; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL, *old_fb = NULL; int ret = 0; unsigned int fb_width, fb_height; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; /* * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. */ obj = drm_mode_object_find(dev, plane_req->plane_id, DRM_MODE_OBJECT_PLANE); if (!obj) { DRM_DEBUG_KMS("Unknown plane ID %d\n", plane_req->plane_id); return -ENOENT; } plane = obj_to_plane(obj); /* No fb means shut it down */ if (!plane_req->fb_id) { drm_modeset_lock_all(dev); old_fb = plane->fb; plane->funcs->disable_plane(plane); plane->crtc = NULL; plane->fb = NULL; drm_modeset_unlock_all(dev); goto out; } obj = drm_mode_object_find(dev, plane_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); ret = -ENOENT; goto out; } crtc = obj_to_crtc(obj); fb = drm_framebuffer_lookup(dev, plane_req->fb_id); if (!fb) { DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", plane_req->fb_id); ret = -ENOENT; goto out; } /* Check whether this plane supports the fb pixel format. */ for (i = 0; i < plane->format_count; i++) if (fb->pixel_format == plane->format_types[i]) break; if (i == plane->format_count) { DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format); ret = -EINVAL; goto out; } fb_width = fb->width << 16; fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ if (plane_req->src_w > fb_width || plane_req->src_x > fb_width - plane_req->src_w || plane_req->src_h > fb_height || plane_req->src_y > fb_height - plane_req->src_h) { DRM_DEBUG_KMS("Invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", plane_req->src_w >> 16, ((plane_req->src_w & 0xffff) * 15625) >> 10, plane_req->src_h >> 16, ((plane_req->src_h & 0xffff) * 15625) >> 10, plane_req->src_x >> 16, ((plane_req->src_x & 0xffff) * 15625) >> 10, plane_req->src_y >> 16, ((plane_req->src_y & 0xffff) * 15625) >> 10); ret = -ENOSPC; goto out; } /* Give drivers some help against integer overflows */ if (plane_req->crtc_w > INT_MAX || plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || plane_req->crtc_h > INT_MAX || plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", plane_req->crtc_w, plane_req->crtc_h, plane_req->crtc_x, plane_req->crtc_y); ret = -ERANGE; goto out; } drm_modeset_lock_all(dev); ret = plane->funcs->update_plane(plane, crtc, fb, plane_req->crtc_x, plane_req->crtc_y, plane_req->crtc_w, plane_req->crtc_h, plane_req->src_x, plane_req->src_y, plane_req->src_w, plane_req->src_h); if (!ret) { old_fb = plane->fb; plane->crtc = crtc; plane->fb = fb; fb = NULL; } drm_modeset_unlock_all(dev); out: if (fb) drm_framebuffer_unreference(fb); if (old_fb) drm_framebuffer_unreference(old_fb); return ret; } /** * drm_mode_set_config_internal - helper to call ->set_config * @set: modeset config to set * * This is a little helper to wrap internal calls to the ->set_config driver * interface. The only thing it adds is correct refcounting dance. */ int drm_mode_set_config_internal(struct drm_mode_set *set) { struct drm_crtc *crtc = set->crtc; struct drm_framebuffer *fb, *old_fb; int ret; old_fb = crtc->fb; fb = set->fb; ret = crtc->funcs->set_config(set); if (ret == 0) { if (old_fb) drm_framebuffer_unreference(old_fb); if (fb) drm_framebuffer_reference(fb); } return ret; } EXPORT_SYMBOL(drm_mode_set_config_internal); /** * drm_mode_setcrtc - set CRTC configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Build a new CRTC configuration based on user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_mode_crtc *crtc_req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_connector **connector_set = NULL, *connector; struct drm_framebuffer *fb = NULL; struct drm_display_mode *mode = NULL; struct drm_mode_set set; uint32_t __user *set_connectors_ptr; int ret; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; /* For some reason crtc x/y offsets are signed internally. */ if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) return -ERANGE; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { int hdisplay, vdisplay; /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { if (!crtc->fb) { DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); ret = -EINVAL; goto out; } fb = crtc->fb; /* Make refcounting symmetric with the lookup path. */ drm_framebuffer_reference(fb); } else { fb = drm_framebuffer_lookup(dev, crtc_req->fb_id); if (!fb) { DRM_DEBUG_KMS("Unknown FB ID%d\n", crtc_req->fb_id); ret = -EINVAL; goto out; } } mode = drm_mode_create(dev); if (!mode) { ret = -ENOMEM; goto out; } ret = drm_crtc_convert_umode(mode, &crtc_req->mode); if (ret) { DRM_DEBUG_KMS("Invalid mode\n"); goto out; } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); hdisplay = mode->hdisplay; vdisplay = mode->vdisplay; if (crtc->invert_dimensions) swap(hdisplay, vdisplay); if (hdisplay > fb->width || vdisplay > fb->height || crtc_req->x > fb->width - hdisplay || crtc_req->y > fb->height - vdisplay) { DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", fb->width, fb->height, hdisplay, vdisplay, crtc_req->x, crtc_req->y, crtc->invert_dimensions ? " (inverted)" : ""); ret = -ENOSPC; goto out; } } if (crtc_req->count_connectors == 0 && mode) { DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0) { u32 out_id; /* Avoid unbounded kernel memory allocation */ if (crtc_req->count_connectors > config->num_connector) { ret = -EINVAL; goto out; } connector_set = kmalloc(crtc_req->count_connectors * sizeof(struct drm_connector *), GFP_KERNEL); if (!connector_set) { ret = -ENOMEM; goto out; } for (i = 0; i < crtc_req->count_connectors; i++) { set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; if (get_user(out_id, &set_connectors_ptr[i])) { ret = -EFAULT; goto out; } obj = drm_mode_object_find(dev, out_id, DRM_MODE_OBJECT_CONNECTOR); if (!obj) { DRM_DEBUG_KMS("Connector id %d unknown\n", out_id); ret = -EINVAL; goto out; } connector = obj_to_connector(obj); DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); connector_set[i] = connector; } } set.crtc = crtc; set.x = crtc_req->x; set.y = crtc_req->y; set.mode = mode; set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; set.fb = fb; ret = drm_mode_set_config_internal(&set); out: if (fb) drm_framebuffer_unreference(fb); kfree(connector_set); drm_mode_destroy(dev, mode); drm_modeset_unlock_all(dev); return ret; } int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor *req = data; struct drm_mode_object *obj; struct drm_crtc *crtc; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); return -EINVAL; } crtc = obj_to_crtc(obj); mutex_lock(&crtc->mutex); if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set) { ret = -ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { if (crtc->funcs->cursor_move) { ret = crtc->funcs->cursor_move(crtc, req->x, req->y); } else { ret = -EFAULT; goto out; } } out: mutex_unlock(&crtc->mutex); return ret; } /* Original addfb only supported RGB formats, so figure out which one */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { uint32_t fmt; switch (bpp) { case 8: fmt = DRM_FORMAT_C8; break; case 16: if (depth == 15) fmt = DRM_FORMAT_XRGB1555; else fmt = DRM_FORMAT_RGB565; break; case 24: fmt = DRM_FORMAT_RGB888; break; case 32: if (depth == 24) fmt = DRM_FORMAT_XRGB8888; else if (depth == 30) fmt = DRM_FORMAT_XRGB2101010; else fmt = DRM_FORMAT_ARGB8888; break; default: DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); fmt = DRM_FORMAT_XRGB8888; break; } return fmt; } EXPORT_SYMBOL(drm_mode_legacy_fb_format); /** * drm_mode_addfb - add an FB to the graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Add a new FB to the specified CRTC, given a user request. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *or = data; struct drm_mode_fb_cmd2 r = {}; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret = 0; /* Use new struct with format internally */ r.fb_id = or->fb_id; r.width = or->width; r.height = or->height; r.pitches[0] = or->pitch; r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if ((config->min_width > r.width) || (r.width > config->max_width)) return -EINVAL; if ((config->min_height > r.height) || (r.height > config->max_height)) return -EINVAL; fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); return PTR_ERR(fb); } mutex_lock(&file_priv->fbs_lock); or->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); mutex_unlock(&file_priv->fbs_lock); return ret; } static int format_check(const struct drm_mode_fb_cmd2 *r) { uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: case DRM_FORMAT_XRGB4444: case DRM_FORMAT_XBGR4444: case DRM_FORMAT_RGBX4444: case DRM_FORMAT_BGRX4444: case DRM_FORMAT_ARGB4444: case DRM_FORMAT_ABGR4444: case DRM_FORMAT_RGBA4444: case DRM_FORMAT_BGRA4444: case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_AYUV: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 0; default: return -EINVAL; } } static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) { int ret, hsub, vsub, num_planes, i; ret = format_check(r); if (ret) { DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format); return ret; } hsub = drm_format_horz_chroma_subsampling(r->pixel_format); vsub = drm_format_vert_chroma_subsampling(r->pixel_format); num_planes = drm_format_num_planes(r->pixel_format); if (r->width == 0 || r->width % hsub) { DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height); return -EINVAL; } if (r->height == 0 || r->height % vsub) { DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); return -EINVAL; } for (i = 0; i < num_planes; i++) { unsigned int width = r->width / (i != 0 ? hsub : 1); unsigned int height = r->height / (i != 0 ? vsub : 1); unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); if (!r->handles[i]) { DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); return -EINVAL; } if ((uint64_t) width * cpp > UINT_MAX) return -ERANGE; if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) return -ERANGE; if (r->pitches[i] < width * cpp) { DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } } return 0; } /** * drm_mode_addfb2 - add an FB to the graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Add a new FB to the specified CRTC, given a user request with format. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd2 *r = data; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (r->flags & ~DRM_MODE_FB_INTERLACED) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); return -EINVAL; } if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); return -EINVAL; } if ((config->min_height > r->height) || (r->height > config->max_height)) { DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", r->height, config->min_height, config->max_height); return -EINVAL; } ret = framebuffer_check(r); if (ret) return ret; fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); return PTR_ERR(fb); } mutex_lock(&file_priv->fbs_lock); r->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); mutex_unlock(&file_priv->fbs_lock); return ret; } /** * drm_mode_rmfb - remove an FB from the configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Remove the FB specified by the user. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_framebuffer *fb = NULL; struct drm_framebuffer *fbl = NULL; uint32_t *id = data; int found = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&file_priv->fbs_lock); mutex_lock(&dev->mode_config.fb_lock); fb = __drm_framebuffer_lookup(dev, *id); if (!fb) goto fail_lookup; list_for_each_entry(fbl, &file_priv->fbs, filp_head) if (fb == fbl) found = 1; if (!found) goto fail_lookup; /* Mark fb as reaped, we still have a ref from fpriv->fbs. */ __drm_framebuffer_unregister(dev, fb); list_del_init(&fb->filp_head); mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&file_priv->fbs_lock); drm_framebuffer_remove(fb); return 0; fail_lookup: mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&file_priv->fbs_lock); return -EINVAL; } /** * drm_mode_getfb - get FB info * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Lookup the FB given its ID and return info about it. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *r = data; struct drm_framebuffer *fb; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; fb = drm_framebuffer_lookup(dev, r->fb_id); if (!fb) return -EINVAL; r->height = fb->height; r->width = fb->width; r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; if (fb->funcs->create_handle) ret = fb->funcs->create_handle(fb, file_priv, &r->handle); else ret = -ENODEV; drm_framebuffer_unreference(fb); return ret; } int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_clip_rect __user *clips_ptr; struct drm_clip_rect *clips = NULL; struct drm_mode_fb_dirty_cmd *r = data; struct drm_framebuffer *fb; unsigned flags; int num_clips; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; fb = drm_framebuffer_lookup(dev, r->fb_id); if (!fb) return -EINVAL; num_clips = r->num_clips; clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; if (!num_clips != !clips_ptr) { ret = -EINVAL; goto out_err1; } flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; /* If userspace annotates copy, clips must come in pairs */ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { ret = -EINVAL; goto out_err1; } if (num_clips && clips_ptr) { if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { ret = -EINVAL; goto out_err1; } clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); if (!clips) { ret = -ENOMEM; goto out_err1; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { ret = -EFAULT; goto out_err2; } } if (fb->funcs->dirty) { drm_modeset_lock_all(dev); ret = fb->funcs->dirty(fb, file_priv, flags, r->color, clips, num_clips); drm_modeset_unlock_all(dev); } else { ret = -ENOSYS; } out_err2: kfree(clips); out_err1: drm_framebuffer_unreference(fb); return ret; } /** * drm_fb_release - remove and free the FBs on this file * @priv: drm file for the ioctl * * Destroy all the FBs associated with @filp. * * Called by the user via ioctl. * * RETURNS: * Zero on success, errno on failure. */ void drm_fb_release(struct drm_file *priv) { struct drm_device *dev = priv->minor->dev; struct drm_framebuffer *fb, *tfb; mutex_lock(&priv->fbs_lock); list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { mutex_lock(&dev->mode_config.fb_lock); /* Mark fb as reaped, we still have a ref from fpriv->fbs. */ __drm_framebuffer_unregister(dev, fb); mutex_unlock(&dev->mode_config.fb_lock); list_del_init(&fb->filp_head); /* This will also drop the fpriv->fbs reference. */ drm_framebuffer_remove(fb); } mutex_unlock(&priv->fbs_lock); } struct drm_property *drm_property_create(struct drm_device *dev, int flags, const char *name, int num_values) { struct drm_property *property = NULL; int ret; property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); if (!property) return NULL; if (num_values) { property->values = kzalloc(sizeof(uint64_t)*num_values, GFP_KERNEL); if (!property->values) goto fail; } ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); if (ret) goto fail; property->flags = flags; property->num_values = num_values; INIT_LIST_HEAD(&property->enum_blob_list); if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); property->name[DRM_PROP_NAME_LEN-1] = '\0'; } list_add_tail(&property->head, &dev->mode_config.property_list); return property; fail: kfree(property->values); kfree(property); return NULL; } EXPORT_SYMBOL(drm_property_create); struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_values) { struct drm_property *property; int i, ret; flags |= DRM_MODE_PROP_ENUM; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; for (i = 0; i < num_values; i++) { ret = drm_property_add_enum(property, i, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; } } return property; } EXPORT_SYMBOL(drm_property_create_enum); struct drm_property *drm_property_create_bitmask(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_values) { struct drm_property *property; int i, ret; flags |= DRM_MODE_PROP_BITMASK; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; for (i = 0; i < num_values; i++) { ret = drm_property_add_enum(property, i, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; } } return property; } EXPORT_SYMBOL(drm_property_create_bitmask); struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, const char *name, uint64_t min, uint64_t max) { struct drm_property *property; flags |= DRM_MODE_PROP_RANGE; property = drm_property_create(dev, flags, name, 2); if (!property) return NULL; property->values[0] = min; property->values[1] = max; return property; } EXPORT_SYMBOL(drm_property_create_range); int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name) { struct drm_property_enum *prop_enum; if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK))) return -EINVAL; /* * Bitmask enum properties have the additional constraint of values * from 0 to 63 */ if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63)) return -EINVAL; if (!list_empty(&property->enum_blob_list)) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (prop_enum->value == value) { strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; return 0; } } } prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); if (!prop_enum) return -ENOMEM; strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; prop_enum->value = value; property->values[index] = value; list_add_tail(&prop_enum->head, &property->enum_blob_list); return 0; } EXPORT_SYMBOL(drm_property_add_enum); void drm_property_destroy(struct drm_device *dev, struct drm_property *property) { struct drm_property_enum *prop_enum, *pt; list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { list_del(&prop_enum->head); kfree(prop_enum); } if (property->num_values) kfree(property->values); drm_mode_object_put(dev, &property->base); list_del(&property->head); kfree(property); } EXPORT_SYMBOL(drm_property_destroy); void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val) { int count = obj->properties->count; if (count == DRM_OBJECT_MAX_PROPERTY) { WARN(1, "Failed to attach object property (type: 0x%x). Please " "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " "you see this message on the same object type.\n", obj->type); return; } obj->properties->ids[count] = property->base.id; obj->properties->values[count] = init_val; obj->properties->count++; } EXPORT_SYMBOL(drm_object_attach_property); int drm_object_property_set_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t val) { int i; for (i = 0; i < obj->properties->count; i++) { if (obj->properties->ids[i] == property->base.id) { obj->properties->values[i] = val; return 0; } } return -EINVAL; } EXPORT_SYMBOL(drm_object_property_set_value); int drm_object_property_get_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val) { int i; for (i = 0; i < obj->properties->count; i++) { if (obj->properties->ids[i] == property->base.id) { *val = obj->properties->values[i]; return 0; } } return -EINVAL; } EXPORT_SYMBOL(drm_object_property_get_value); int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_property *out_resp = data; struct drm_property *property; int enum_count = 0; int blob_count = 0; int value_count = 0; int ret = 0, i; int copied; struct drm_property_enum *prop_enum; struct drm_mode_property_enum __user *enum_ptr; struct drm_property_blob *prop_blob; uint32_t __user *blob_id_ptr; uint64_t __user *values_ptr; uint32_t __user *blob_length_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { ret = -EINVAL; goto done; } property = obj_to_property(obj); if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { list_for_each_entry(prop_enum, &property->enum_blob_list, head) enum_count++; } else if (property->flags & DRM_MODE_PROP_BLOB) { list_for_each_entry(prop_blob, &property->enum_blob_list, head) blob_count++; } value_count = property->num_values; strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); out_resp->name[DRM_PROP_NAME_LEN-1] = 0; out_resp->flags = property->flags; if ((out_resp->count_values >= value_count) && value_count) { values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; for (i = 0; i < value_count; i++) { if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { ret = -EFAULT; goto done; } } } out_resp->count_values = value_count; if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) { if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { copied = 0; enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; list_for_each_entry(prop_enum, &property->enum_blob_list, head) { if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { ret = -EFAULT; goto done; } if (copy_to_user(&enum_ptr[copied].name, &prop_enum->name, DRM_PROP_NAME_LEN)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = enum_count; } if (property->flags & DRM_MODE_PROP_BLOB) { if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { copied = 0; blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr; blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr; list_for_each_entry(prop_blob, &property->enum_blob_list, head) { if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { ret = -EFAULT; goto done; } if (put_user(prop_blob->length, blob_length_ptr + copied)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = blob_count; } done: drm_modeset_unlock_all(dev); return ret; } static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, void *data) { struct drm_property_blob *blob; int ret; if (!length || !data) return NULL; blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return NULL; ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); if (ret) { kfree(blob); return NULL; } blob->length = length; memcpy(blob->data, data, length); list_add_tail(&blob->head, &dev->mode_config.property_blob_list); return blob; } static void drm_property_destroy_blob(struct drm_device *dev, struct drm_property_blob *blob) { drm_mode_object_put(dev, &blob->base); list_del(&blob->head); kfree(blob); } int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_object *obj; struct drm_mode_get_blob *out_resp = data; struct drm_property_blob *blob; int ret = 0; void __user *blob_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { ret = -EINVAL; goto done; } blob = obj_to_blob(obj); if (out_resp->length == blob->length) { blob_ptr = (void __user *)(unsigned long)out_resp->data; if (copy_to_user(blob_ptr, blob->data, blob->length)){ ret = -EFAULT; goto done; } } out_resp->length = blob->length; done: drm_modeset_unlock_all(dev); return ret; } int drm_mode_connector_update_edid_property(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; int ret, size; if (connector->edid_blob_ptr) drm_property_destroy_blob(dev, connector->edid_blob_ptr); /* Delete edid, when there is none. */ if (!edid) { connector->edid_blob_ptr = NULL; ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0); return ret; } size = EDID_LENGTH * (1 + edid->extensions); connector->edid_blob_ptr = drm_property_create_blob(connector->dev, size, edid); if (!connector->edid_blob_ptr) return -EINVAL; ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, connector->edid_blob_ptr->base.id); return ret; } EXPORT_SYMBOL(drm_mode_connector_update_edid_property); static bool drm_property_change_is_valid(struct drm_property *property, uint64_t value) { if (property->flags & DRM_MODE_PROP_IMMUTABLE) return false; if (property->flags & DRM_MODE_PROP_RANGE) { if (value < property->values[0] || value > property->values[1]) return false; return true; } else if (property->flags & DRM_MODE_PROP_BITMASK) { int i; uint64_t valid_mask = 0; for (i = 0; i < property->num_values; i++) valid_mask |= (1ULL << property->values[i]); return !(value & ~valid_mask); } else if (property->flags & DRM_MODE_PROP_BLOB) { /* Only the driver knows */ return true; } else { int i; for (i = 0; i < property->num_values; i++) if (property->values[i] == value) return true; return false; } } int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_connector_set_property *conn_set_prop = data; struct drm_mode_obj_set_property obj_set_prop = { .value = conn_set_prop->value, .prop_id = conn_set_prop->prop_id, .obj_id = conn_set_prop->connector_id, .obj_type = DRM_MODE_OBJECT_CONNECTOR }; /* It does all the locking and checking we need */ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); } static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_connector *connector = obj_to_connector(obj); /* Do DPMS ourselves */ if (property == connector->dev->mode_config.dpms_property) { if (connector->funcs->dpms) (*connector->funcs->dpms)(connector, (int)value); ret = 0; } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, value); /* store the property value if successful */ if (!ret) drm_object_property_set_value(&connector->base, property, value); return ret; } static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_crtc *crtc = obj_to_crtc(obj); if (crtc->funcs->set_property) ret = crtc->funcs->set_property(crtc, property, value); if (!ret) drm_object_property_set_value(obj, property, value); return ret; } static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_plane *plane = obj_to_plane(obj); if (plane->funcs->set_property) ret = plane->funcs->set_property(plane, property, value); if (!ret) drm_object_property_set_value(obj, property, value); return ret; } int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_obj_get_properties *arg = data; struct drm_mode_object *obj; int ret = 0; int i; int copied = 0; int props_count = 0; uint32_t __user *props_ptr; uint64_t __user *prop_values_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); if (!obj) { ret = -EINVAL; goto out; } if (!obj->properties) { ret = -EINVAL; goto out; } props_count = obj->properties->count; /* This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if ((arg->count_props >= props_count) && props_count) { copied = 0; props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); prop_values_ptr = (uint64_t __user *)(unsigned long) (arg->prop_values_ptr); for (i = 0; i < props_count; i++) { if (put_user(obj->properties->ids[i], props_ptr + copied)) { ret = -EFAULT; goto out; } if (put_user(obj->properties->values[i], prop_values_ptr + copied)) { ret = -EFAULT; goto out; } copied++; } } arg->count_props = props_count; out: drm_modeset_unlock_all(dev); return ret; } int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_obj_set_property *arg = data; struct drm_mode_object *arg_obj; struct drm_mode_object *prop_obj; struct drm_property *property; int ret = -EINVAL; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); if (!arg_obj) goto out; if (!arg_obj->properties) goto out; for (i = 0; i < arg_obj->properties->count; i++) if (arg_obj->properties->ids[i] == arg->prop_id) break; if (i == arg_obj->properties->count) goto out; prop_obj = drm_mode_object_find(dev, arg->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!prop_obj) goto out; property = obj_to_property(prop_obj); if (!drm_property_change_is_valid(property, arg->value)) goto out; switch (arg_obj->type) { case DRM_MODE_OBJECT_CONNECTOR: ret = drm_mode_connector_set_obj_prop(arg_obj, property, arg->value); break; case DRM_MODE_OBJECT_CRTC: ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); break; case DRM_MODE_OBJECT_PLANE: ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value); break; } out: drm_modeset_unlock_all(dev); return ret; } int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; return 0; } } return -ENOMEM; } EXPORT_SYMBOL(drm_mode_connector_attach_encoder); void drm_mode_connector_detach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == encoder->base.id) { connector->encoder_ids[i] = 0; if (connector->encoder == encoder) connector->encoder = NULL; break; } } } EXPORT_SYMBOL(drm_mode_connector_detach_encoder); int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { crtc->gamma_size = gamma_size; crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL); if (!crtc->gamma_store) { crtc->gamma_size = 0; return -ENOMEM; } return 0; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); if (crtc->funcs->gamma_set == NULL) { ret = -ENOSYS; goto out; } /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { ret = -EFAULT; goto out; } crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); out: drm_modeset_unlock_all(dev); return ret; } int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_mode_object *obj; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { ret = -EINVAL; goto out; } crtc = obj_to_crtc(obj); /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { ret = -EFAULT; goto out; } out: drm_modeset_unlock_all(dev); return ret; } int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_page_flip *page_flip = data; struct drm_mode_object *obj; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL, *old_fb = NULL; struct drm_pending_vblank_event *e = NULL; unsigned long flags; int hdisplay, vdisplay; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return -EINVAL; obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) return -EINVAL; crtc = obj_to_crtc(obj); mutex_lock(&crtc->mutex); if (crtc->fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not * yet discovered. */ ret = -EBUSY; goto out; } if (crtc->funcs->page_flip == NULL) goto out; fb = drm_framebuffer_lookup(dev, page_flip->fb_id); if (!fb) goto out; hdisplay = crtc->mode.hdisplay; vdisplay = crtc->mode.vdisplay; if (crtc->invert_dimensions) swap(hdisplay, vdisplay); if (hdisplay > fb->width || vdisplay > fb->height || crtc->x > fb->width - hdisplay || crtc->y > fb->height - vdisplay) { DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n", fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y, crtc->invert_dimensions ? " (inverted)" : ""); ret = -ENOSPC; goto out; } if (crtc->fb->pixel_format != fb->pixel_format) { DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); ret = -EINVAL; goto out; } if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { ret = -ENOMEM; spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } file_priv->event_space -= sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); e = kzalloc(sizeof *e, GFP_KERNEL); if (e == NULL) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof e->event; e->event.user_data = page_flip->user_data; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; } old_fb = crtc->fb; ret = crtc->funcs->page_flip(crtc, fb, e); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof e->event; spin_unlock_irqrestore(&dev->event_lock, flags); kfree(e); } /* Keep the old fb, don't unref it. */ old_fb = NULL; } else { /* * Warn if the driver hasn't properly updated the crtc->fb * field to reflect that the new framebuffer is now used. * Failing to do so will screw with the reference counting * on framebuffers. */ WARN_ON(crtc->fb != fb); /* Unref only the old framebuffer. */ fb = NULL; } out: if (fb) drm_framebuffer_unreference(fb); if (old_fb) drm_framebuffer_unreference(old_fb); mutex_unlock(&crtc->mutex); return ret; } void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) if (crtc->funcs->reset) crtc->funcs->reset(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) if (encoder->funcs->reset) encoder->funcs->reset(encoder); list_for_each_entry(connector, &dev->mode_config.connector_list, head) { connector->status = connector_status_unknown; if (connector->funcs->reset) connector->funcs->reset(connector); } } EXPORT_SYMBOL(drm_mode_config_reset); int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_dumb *args = data; if (!dev->driver->dumb_create) return -ENOSYS; return dev->driver->dumb_create(file_priv, dev, args); } int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; /* call driver ioctl to get mmap offset */ if (!dev->driver->dumb_map_offset) return -ENOSYS; return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; if (!dev->driver->dumb_destroy) return -ENOSYS; return dev->driver->dumb_destroy(file_priv, dev, args->handle); } /* * Just need to support RGB formats here for compat with code that doesn't * use pixel formats directly yet. */ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: *depth = 8; *bpp = 8; break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: *depth = 15; *bpp = 16; break; case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: *depth = 16; *bpp = 16; break; case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: *depth = 24; *bpp = 24; break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: *depth = 24; *bpp = 32; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: *depth = 30; *bpp = 32; break; case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: *depth = 32; *bpp = 32; break; default: DRM_DEBUG_KMS("unsupported pixel format\n"); *depth = 0; *bpp = 0; break; } } EXPORT_SYMBOL(drm_fb_get_bpp_depth); /** * drm_format_num_planes - get the number of planes for format * @format: pixel format (DRM_FORMAT_*) * * RETURNS: * The number of planes used by the specified pixel format. */ int drm_format_num_planes(uint32_t format) { switch (format) { case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 3; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_num_planes); /** * drm_format_plane_cpp - determine the bytes per pixel value * @format: pixel format (DRM_FORMAT_*) * @plane: plane index * * RETURNS: * The bytes per pixel value for the specified plane. */ int drm_format_plane_cpp(uint32_t format, int plane) { unsigned int depth; int bpp; if (plane >= drm_format_num_planes(format)) return 0; switch (format) { case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: return 2; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: return plane ? 2 : 1; case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 1; default: drm_fb_get_bpp_depth(format, &depth, &bpp); return bpp >> 3; } } EXPORT_SYMBOL(drm_format_plane_cpp); /** * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor * @format: pixel format (DRM_FORMAT_*) * * RETURNS: * The horizontal chroma subsampling factor for the * specified pixel format. */ int drm_format_horz_chroma_subsampling(uint32_t format) { switch (format) { case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: return 4; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); /** * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor * @format: pixel format (DRM_FORMAT_*) * * RETURNS: * The vertical chroma subsampling factor for the * specified pixel format. */ int drm_format_vert_chroma_subsampling(uint32_t format) { switch (format) { case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: return 4; case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); /** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. * * Since this initializes the modeset locks, no locking is possible. Which is no * problem, since this should happen single threaded at init time. It is the * driver's problem to ensure this guarantee. * */ void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); mutex_init(&dev->mode_config.idr_mutex); mutex_init(&dev->mode_config.fb_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); idr_init(&dev->mode_config.crtc_idr); drm_modeset_lock_all(dev); drm_mode_create_standard_connector_properties(dev); drm_modeset_unlock_all(dev); /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; } EXPORT_SYMBOL(drm_mode_config_init); /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. * * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; struct drm_plane *plane, *plt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, head) { drm_property_destroy_blob(dev, blob); } /* * Single-threaded teardown context, so it's not required to grab the * fb_lock to protect against concurrent fb_list access. Contrary, it * would actually deadlock with the drm_framebuffer_cleanup function. * * Also, if there are any framebuffers left, that's a driver leak now, * so politely WARN about this. */ WARN_ON(!list_empty(&dev->mode_config.fb_list)); list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { drm_framebuffer_remove(fb); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } idr_destroy(&dev->mode_config.crtc_idr); } EXPORT_SYMBOL(drm_mode_config_cleanup);
gpl-2.0
simo97/linux
drivers/staging/rtl8712/rtl8712_recv.c
221
35246
/****************************************************************************** * rtl8712_recv.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL8712_RECV_C_ #include <linux/if_ether.h> #include <linux/ip.h> #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "mlme_osdep.h" #include "ethernet.h" #include "usb_ops.h" #include "wifi.h" /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static u8 bridge_tunnel_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8}; /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static u8 rfc1042_header[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; static void recv_tasklet(void *priv); int r8712_init_recv_priv(struct recv_priv *precvpriv, struct _adapter *padapter) { int i; struct recv_buf *precvbuf; int res = _SUCCESS; addr_t tmpaddr = 0; int alignment = 0; struct sk_buff *pskb = NULL; /*init recv_buf*/ _init_queue(&precvpriv->free_recv_buf_queue); precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC); if (precvpriv->pallocated_recv_buf == NULL) return _FAIL; precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 - ((addr_t) (precvpriv->pallocated_recv_buf) & 3); precvbuf = (struct recv_buf *)precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { INIT_LIST_HEAD(&precvbuf->list); spin_lock_init(&precvbuf->recvbuf_lock); res = r8712_os_recvbuf_resource_alloc(padapter, precvbuf); if (res == _FAIL) break; precvbuf->ref_cnt = 0; precvbuf->adapter = padapter; list_add_tail(&precvbuf->list, &(precvpriv->free_recv_buf_queue.queue)); precvbuf++; } precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF; tasklet_init(&precvpriv->recv_tasklet, (void(*)(unsigned long))recv_tasklet, (unsigned long)padapter); skb_queue_head_init(&precvpriv->rx_skb_queue); skb_queue_head_init(&precvpriv->free_recv_skb_queue); for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) { pskb = netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ); if (pskb) { tmpaddr = (addr_t)pskb->data; alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1); skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment)); skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); } pskb = NULL; } return res; } void r8712_free_recv_priv(struct recv_priv *precvpriv) { int i; struct recv_buf *precvbuf; struct _adapter *padapter = precvpriv->adapter; precvbuf = (struct recv_buf *)precvpriv->precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { r8712_os_recvbuf_resource_free(padapter, precvbuf); precvbuf++; } kfree(precvpriv->pallocated_recv_buf); skb_queue_purge(&precvpriv->rx_skb_queue); if (skb_queue_len(&precvpriv->rx_skb_queue)) netdev_warn(padapter->pnetdev, "r8712u: rx_skb_queue not empty\n"); skb_queue_purge(&precvpriv->free_recv_skb_queue); if (skb_queue_len(&precvpriv->free_recv_skb_queue)) netdev_warn(padapter->pnetdev, "r8712u: free_recv_skb_queue not empty %d\n", skb_queue_len(&precvpriv->free_recv_skb_queue)); } int r8712_init_recvbuf(struct _adapter *padapter, struct recv_buf *precvbuf) { precvbuf->transfer_len = 0; precvbuf->len = 0; precvbuf->ref_cnt = 0; if (precvbuf->pbuf) { precvbuf->pdata = precvbuf->pbuf; precvbuf->phead = precvbuf->pbuf; precvbuf->ptail = precvbuf->pbuf; precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ; } return _SUCCESS; } int r8712_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue) { unsigned long irqL; struct _adapter *padapter = precvframe->u.hdr.adapter; struct recv_priv *precvpriv = &padapter->recvpriv; if (precvframe->u.hdr.pkt) { dev_kfree_skb_any(precvframe->u.hdr.pkt);/*free skb by driver*/ precvframe->u.hdr.pkt = NULL; } spin_lock_irqsave(&pfree_recv_queue->lock, irqL); list_del_init(&(precvframe->u.hdr.list)); list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue); if (padapter != NULL) { if (pfree_recv_queue == &precvpriv->free_recv_queue) precvpriv->free_recvframe_cnt++; } spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL); return _SUCCESS; } static void update_recvframe_attrib_from_recvstat(struct rx_pkt_attrib *pattrib, struct recv_stat *prxstat) { u16 drvinfo_sz = 0; drvinfo_sz = (le32_to_cpu(prxstat->rxdw0)&0x000f0000)>>16; drvinfo_sz <<= 3; /*TODO: * Offset 0 */ pattrib->bdecrypted = ((le32_to_cpu(prxstat->rxdw0) & BIT(27)) >> 27) ? 0 : 1; pattrib->crc_err = (le32_to_cpu(prxstat->rxdw0) & BIT(14)) >> 14; /*Offset 4*/ /*Offset 8*/ /*Offset 12*/ if (le32_to_cpu(prxstat->rxdw3) & BIT(13)) { pattrib->tcpchk_valid = 1; /* valid */ if (le32_to_cpu(prxstat->rxdw3) & BIT(11)) pattrib->tcp_chkrpt = 1; /* correct */ else pattrib->tcp_chkrpt = 0; /* incorrect */ if (le32_to_cpu(prxstat->rxdw3) & BIT(12)) pattrib->ip_chkrpt = 1; /* correct */ else pattrib->ip_chkrpt = 0; /* incorrect */ } else pattrib->tcpchk_valid = 0; /* invalid */ pattrib->mcs_rate = (u8)((le32_to_cpu(prxstat->rxdw3)) & 0x3f); pattrib->htc = (u8)((le32_to_cpu(prxstat->rxdw3) >> 14) & 0x1); /*Offset 16*/ /*Offset 20*/ /*phy_info*/ } /*perform defrag*/ static union recv_frame *recvframe_defrag(struct _adapter *adapter, struct __queue *defrag_q) { struct list_head *plist, *phead; u8 wlanhdr_offset; u8 curfragnum; struct recv_frame_hdr *pfhdr, *pnfhdr; union recv_frame *prframe, *pnextrframe; struct __queue *pfree_recv_queue; pfree_recv_queue = &adapter->recvpriv.free_recv_queue; phead = &defrag_q->queue; plist = phead->next; prframe = LIST_CONTAINOR(plist, union recv_frame, u); list_del_init(&prframe->u.list); pfhdr = &prframe->u.hdr; curfragnum = 0; if (curfragnum != pfhdr->attrib.frag_num) { /*the first fragment number must be 0 *free the whole queue*/ r8712_free_recvframe(prframe, pfree_recv_queue); r8712_free_recvframe_queue(defrag_q, pfree_recv_queue); return NULL; } curfragnum++; plist = &defrag_q->queue; plist = plist->next; while (end_of_queue_search(phead, plist) == false) { pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u); pnfhdr = &pnextrframe->u.hdr; /*check the fragment sequence (2nd ~n fragment frame) */ if (curfragnum != pnfhdr->attrib.frag_num) { /* the fragment number must increase (after decache) * release the defrag_q & prframe */ r8712_free_recvframe(prframe, pfree_recv_queue); r8712_free_recvframe_queue(defrag_q, pfree_recv_queue); return NULL; } curfragnum++; /* copy the 2nd~n fragment frame's payload to the first fragment * get the 2nd~last fragment frame's payload */ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len; recvframe_pull(pnextrframe, wlanhdr_offset); /* append to first fragment frame's tail (if privacy frame, * pull the ICV) */ recvframe_pull_tail(prframe, pfhdr->attrib.icv_len); memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len); recvframe_put(prframe, pnfhdr->len); pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len; plist = plist->next; } /* free the defrag_q queue and return the prframe */ r8712_free_recvframe_queue(defrag_q, pfree_recv_queue); return prframe; } /* check if need to defrag, if needed queue the frame to defrag_q */ union recv_frame *r8712_recvframe_chk_defrag(struct _adapter *padapter, union recv_frame *precv_frame) { u8 ismfrag; u8 fragnum; u8 *psta_addr; struct recv_frame_hdr *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv; struct list_head *phead; union recv_frame *prtnframe = NULL; struct __queue *pfree_recv_queue, *pdefrag_q; pstapriv = &padapter->stapriv; pfhdr = &precv_frame->u.hdr; pfree_recv_queue = &padapter->recvpriv.free_recv_queue; /* need to define struct of wlan header frame ctrl */ ismfrag = pfhdr->attrib.mfrag; fragnum = pfhdr->attrib.frag_num; psta_addr = pfhdr->attrib.ta; psta = r8712_get_stainfo(pstapriv, psta_addr); if (psta == NULL) pdefrag_q = NULL; else pdefrag_q = &psta->sta_recvpriv.defrag_q; if ((ismfrag == 0) && (fragnum == 0)) prtnframe = precv_frame;/*isn't a fragment frame*/ if (ismfrag == 1) { /* 0~(n-1) fragment frame * enqueue to defraf_g */ if (pdefrag_q != NULL) { if (fragnum == 0) { /*the first fragment*/ if (!list_empty(&pdefrag_q->queue)) { /*free current defrag_q */ r8712_free_recvframe_queue(pdefrag_q, pfree_recv_queue); } } /* Then enqueue the 0~(n-1) fragment to the defrag_q */ phead = &pdefrag_q->queue; list_add_tail(&pfhdr->list, phead); prtnframe = NULL; } else { /* can't find this ta's defrag_queue, so free this * recv_frame */ r8712_free_recvframe(precv_frame, pfree_recv_queue); prtnframe = NULL; } } if ((ismfrag == 0) && (fragnum != 0)) { /* the last fragment frame * enqueue the last fragment */ if (pdefrag_q != NULL) { phead = &pdefrag_q->queue; list_add_tail(&pfhdr->list, phead); /*call recvframe_defrag to defrag*/ precv_frame = recvframe_defrag(padapter, pdefrag_q); prtnframe = precv_frame; } else { /* can't find this ta's defrag_queue, so free this * recv_frame */ r8712_free_recvframe(precv_frame, pfree_recv_queue); prtnframe = NULL; } } if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) { /* after defrag we must check tkip mic code */ if (r8712_recvframe_chkmic(padapter, prtnframe) == _FAIL) { r8712_free_recvframe(prtnframe, pfree_recv_queue); prtnframe = NULL; } } return prtnframe; } static int amsdu_to_msdu(struct _adapter *padapter, union recv_frame *prframe) { int a_len, padding_len; u16 eth_type, nSubframe_Length; u8 nr_subframes, i; unsigned char *data_ptr, *pdata; struct rx_pkt_attrib *pattrib; _pkt *sub_skb, *subframes[MAX_SUBFRAME_COUNT]; struct recv_priv *precvpriv = &padapter->recvpriv; struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue); nr_subframes = 0; pattrib = &prframe->u.hdr.attrib; recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen); if (prframe->u.hdr.attrib.iv_len > 0) recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len); a_len = prframe->u.hdr.len; pdata = prframe->u.hdr.rx_data; while (a_len > ETH_HLEN) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16 *)(pdata + 12)); /*==m==>change the length order*/ nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8); if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) { netdev_warn(padapter->pnetdev, "r8712u: nRemain_Length is %d and nSubframe_Length is: %d\n", a_len, nSubframe_Length); goto exit; } /* move the data point to data content */ pdata += ETH_HLEN; a_len -= ETH_HLEN; /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); if (!sub_skb) break; skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr, pdata, nSubframe_Length); subframes[nr_subframes++] = sub_skb; if (nr_subframes >= MAX_SUBFRAME_COUNT) { netdev_warn(padapter->pnetdev, "r8712u: ParseSubframe(): Too many Subframes! Packets dropped!\n"); break; } pdata += nSubframe_Length; a_len -= nSubframe_Length; if (a_len != 0) { padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & 3); if (padding_len == 4) padding_len = 0; if (a_len < padding_len) goto exit; pdata += padding_len; a_len -= padding_len; } } for (i = 0; i < nr_subframes; i++) { sub_skb = subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ eth_type = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) && eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN); } /* Indicate the packets to upper layer */ if (sub_skb) { sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev); sub_skb->dev = padapter->pnetdev; if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) { sub_skb->ip_summed = CHECKSUM_UNNECESSARY; } else sub_skb->ip_summed = CHECKSUM_NONE; netif_rx(sub_skb); } } exit: prframe->u.hdr.len = 0; r8712_free_recvframe(prframe, pfree_recv_queue); return _SUCCESS; } void r8712_rxcmd_event_hdl(struct _adapter *padapter, void *prxcmdbuf) { uint voffset; u8 *poffset; u16 cmd_len, drvinfo_sz; struct recv_stat *prxstat; poffset = (u8 *)prxcmdbuf; voffset = *(uint *)poffset; prxstat = (struct recv_stat *)prxcmdbuf; drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16; drvinfo_sz <<= 3; poffset += RXDESC_SIZE + drvinfo_sz; do { voffset = *(uint *)poffset; cmd_len = (u16)(le32_to_cpu(voffset) & 0xffff); r8712_event_handle(padapter, (uint *)poffset); poffset += (cmd_len + 8);/*8 bytes alignment*/ } while (le32_to_cpu(voffset) & BIT(31)); } static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num) { u8 wsize = preorder_ctrl->wsize_b; u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) % 4096; /* Rx Reorder initialize condition.*/ if (preorder_ctrl->indicate_seq == 0xffff) preorder_ctrl->indicate_seq = seq_num; /* Drop out the packet which SeqNum is smaller than WinStart */ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq)) return false; /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096; else if (SN_LESS(wend, seq_num)) { if (seq_num >= (wsize - 1)) preorder_ctrl->indicate_seq = seq_num + 1 - wsize; else preorder_ctrl->indicate_seq = 4095 - (wsize - (seq_num + 1)) + 1; } return true; } static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe) { struct list_head *phead, *plist; union recv_frame *pnextrframe; struct rx_pkt_attrib *pnextattrib; struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; phead = &ppending_recvframe_queue->queue; plist = phead->next; while (end_of_queue_search(phead, plist) == false) { pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u); pnextattrib = &pnextrframe->u.hdr.attrib; if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num)) plist = plist->next; else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) return false; else break; } list_del_init(&(prframe->u.hdr.list)); list_add_tail(&(prframe->u.hdr.list), plist); return true; } int r8712_recv_indicatepkts_in_order(struct _adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced) { struct list_head *phead, *plist; union recv_frame *prframe; struct rx_pkt_attrib *pattrib; int bPktInBuf = false; struct recv_priv *precvpriv = &padapter->recvpriv; struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; phead = &ppending_recvframe_queue->queue; plist = phead->next; /* Handling some condition for forced indicate case.*/ if (bforced == true) { if (list_empty(phead)) return true; prframe = LIST_CONTAINOR(plist, union recv_frame, u); pattrib = &prframe->u.hdr.attrib; preorder_ctrl->indicate_seq = pattrib->seq_num; } /* Prepare indication list and indication. * Check if there is any packet need indicate. */ while (!list_empty(phead)) { prframe = LIST_CONTAINOR(plist, union recv_frame, u); pattrib = &prframe->u.hdr.attrib; if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) { plist = plist->next; list_del_init(&(prframe->u.hdr.list)); if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num)) preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) % 4096; /*indicate this recv_frame*/ if (!pattrib->amsdu) { if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { /* indicate this recv_frame */ r8712_recv_indicatepkt(padapter, prframe); } } else if (pattrib->amsdu == 1) { if (amsdu_to_msdu(padapter, prframe) != _SUCCESS) r8712_free_recvframe(prframe, &precvpriv->free_recv_queue); } /* Update local variables. */ bPktInBuf = false; } else { bPktInBuf = true; break; } } return bPktInBuf; } static int recv_indicatepkt_reorder(struct _adapter *padapter, union recv_frame *prframe) { unsigned long irql; struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl; struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; if (!pattrib->amsdu) { /* s1. */ r8712_wlanhdr_to_ethhdr(prframe); if (pattrib->qos != 1) { if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { r8712_recv_indicatepkt(padapter, prframe); return _SUCCESS; } else return _FAIL; } } spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); /*s2. check if winstart_b(indicate_seq) needs to be updated*/ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) goto _err_exit; /*s3. Insert all packet into Reorder Queue to maintain its ordering.*/ if (!enqueue_reorder_recvframe(preorder_ctrl, prframe)) goto _err_exit; /*s4. * Indication process. * After Packet dropping and Sliding Window shifting as above, we can * now just indicate the packets with the SeqNum smaller than latest * WinStart and buffer other packets. * * For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to * WinStart => Buffer it. */ if (r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, false) == true) { mod_timer(&preorder_ctrl->reordering_ctrl_timer, jiffies + msecs_to_jiffies(REORDER_WAIT_TIME)); spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); } else { spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); del_timer(&preorder_ctrl->reordering_ctrl_timer); } return _SUCCESS; _err_exit: spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); return _FAIL; } void r8712_reordering_ctrl_timeout_handler(void *pcontext) { unsigned long irql; struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext; struct _adapter *padapter = preorder_ctrl->padapter; struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue; if (padapter->bDriverStopped || padapter->bSurpriseRemoved) return; spin_lock_irqsave(&ppending_recvframe_queue->lock, irql); r8712_recv_indicatepkts_in_order(padapter, preorder_ctrl, true); spin_unlock_irqrestore(&ppending_recvframe_queue->lock, irql); } static int r8712_process_recv_indicatepkts(struct _adapter *padapter, union recv_frame *prframe) { int retval = _SUCCESS; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if (phtpriv->ht_option == 1) { /*B/G/N Mode*/ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) { /* including perform A-MPDU Rx Ordering Buffer Control*/ if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) return _FAIL; } } else { /*B/G mode*/ retval = r8712_wlanhdr_to_ethhdr(prframe); if (retval != _SUCCESS) return retval; if ((padapter->bDriverStopped == false) && (padapter->bSurpriseRemoved == false)) { /* indicate this recv_frame */ r8712_recv_indicatepkt(padapter, prframe); } else return _FAIL; } return retval; } static u8 query_rx_pwr_percentage(s8 antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; else if (antpower >= 0) return 100; else return 100 + antpower; } static u8 evm_db2percentage(s8 value) { /* * -33dB~0dB to 0%~99% */ s8 ret_val; ret_val = value; if (ret_val >= 0) ret_val = 0; if (ret_val <= -33) ret_val = -33; ret_val = -ret_val; ret_val *= 3; if (ret_val == 99) ret_val = 100; return ret_val; } s32 r8712_signal_scale_mapping(s32 cur_sig) { s32 ret_sig; if (cur_sig >= 51 && cur_sig <= 100) ret_sig = 100; else if (cur_sig >= 41 && cur_sig <= 50) ret_sig = 80 + ((cur_sig - 40) * 2); else if (cur_sig >= 31 && cur_sig <= 40) ret_sig = 66 + (cur_sig - 30); else if (cur_sig >= 21 && cur_sig <= 30) ret_sig = 54 + (cur_sig - 20); else if (cur_sig >= 10 && cur_sig <= 20) ret_sig = 42 + (((cur_sig - 10) * 2) / 3); else if (cur_sig >= 5 && cur_sig <= 9) ret_sig = 22 + (((cur_sig - 5) * 3) / 2); else if (cur_sig >= 1 && cur_sig <= 4) ret_sig = 6 + (((cur_sig - 1) * 3) / 2); else ret_sig = cur_sig; return ret_sig; } static s32 translate2dbm(struct _adapter *padapter, u8 signal_strength_idx) { s32 signal_power; /* in dBm.*/ /* Translate to dBm (x=0.5y-95).*/ signal_power = (s32)((signal_strength_idx + 1) >> 1); signal_power -= 95; return signal_power; } static void query_rx_phy_status(struct _adapter *padapter, union recv_frame *prframe) { u8 i, max_spatial_stream, evm; struct recv_stat *prxstat = (struct recv_stat *)prframe->u.hdr.rx_head; struct phy_stat *pphy_stat = (struct phy_stat *)(prxstat + 1); u8 *pphy_head = (u8 *)(prxstat + 1); s8 rx_pwr[4], rx_pwr_all; u8 pwdb_all; u32 rssi, total_rssi = 0; u8 bcck_rate = 0, rf_rx_num = 0, cck_highpwr = 0; struct phy_cck_rx_status *pcck_buf; u8 sq; /* Record it for next packet processing*/ bcck_rate = (prframe->u.hdr.attrib.mcs_rate <= 3 ? 1 : 0); if (bcck_rate) { u8 report; /* CCK Driver info Structure is not the same as OFDM packet.*/ pcck_buf = (struct phy_cck_rx_status *)pphy_stat; /* (1)Hardware does not provide RSSI for CCK * (2)PWDB, Average PWDB cacluated by hardware * (for rate adaptive) */ if (!cck_highpwr) { report = pcck_buf->cck_agc_rpt & 0xc0; report >>= 6; switch (report) { /* Modify the RF RNA gain value to -40, -20, * -2, 14 by Jenyu's suggestion * Note: different RF with the different * RNA gain. */ case 0x3: rx_pwr_all = -40 - (pcck_buf->cck_agc_rpt & 0x3e); break; case 0x2: rx_pwr_all = -20 - (pcck_buf->cck_agc_rpt & 0x3e); break; case 0x1: rx_pwr_all = -2 - (pcck_buf->cck_agc_rpt & 0x3e); break; case 0x0: rx_pwr_all = 14 - (pcck_buf->cck_agc_rpt & 0x3e); break; } } else { report = ((u8)(le32_to_cpu(pphy_stat->phydw1) >> 8)) & 0x60; report >>= 5; switch (report) { case 0x3: rx_pwr_all = -40 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1); break; case 0x2: rx_pwr_all = -20 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1); break; case 0x1: rx_pwr_all = -2 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1); break; case 0x0: rx_pwr_all = 14 - ((pcck_buf->cck_agc_rpt & 0x1f) << 1); break; } } pwdb_all = query_rx_pwr_percentage(rx_pwr_all); /* CCK gain is smaller than OFDM/MCS gain,*/ /* so we add gain diff by experiences, the val is 6 */ pwdb_all += 6; if (pwdb_all > 100) pwdb_all = 100; /* modify the offset to make the same gain index with OFDM.*/ if (pwdb_all > 34 && pwdb_all <= 42) pwdb_all -= 2; else if (pwdb_all > 26 && pwdb_all <= 34) pwdb_all -= 6; else if (pwdb_all > 14 && pwdb_all <= 26) pwdb_all -= 8; else if (pwdb_all > 4 && pwdb_all <= 14) pwdb_all -= 4; /* * (3) Get Signal Quality (EVM) */ if (pwdb_all > 40) sq = 100; else { sq = pcck_buf->sq_rpt; if (pcck_buf->sq_rpt > 64) sq = 0; else if (pcck_buf->sq_rpt < 20) sq = 100; else sq = ((64-sq) * 100) / 44; } prframe->u.hdr.attrib.signal_qual = sq; prframe->u.hdr.attrib.rx_mimo_signal_qual[0] = sq; prframe->u.hdr.attrib.rx_mimo_signal_qual[1] = -1; } else { /* (1)Get RSSI for HT rate */ for (i = 0; i < ((padapter->registrypriv.rf_config) & 0x0f); i++) { rf_rx_num++; rx_pwr[i] = ((pphy_head[PHY_STAT_GAIN_TRSW_SHT + i] & 0x3F) * 2) - 110; /* Translate DBM to percentage. */ rssi = query_rx_pwr_percentage(rx_pwr[i]); total_rssi += rssi; } /* (2)PWDB, Average PWDB cacluated by hardware (for * rate adaptive) */ rx_pwr_all = (((pphy_head[PHY_STAT_PWDB_ALL_SHT]) >> 1) & 0x7f) - 106; pwdb_all = query_rx_pwr_percentage(rx_pwr_all); { /* (3)EVM of HT rate */ if (prframe->u.hdr.attrib.htc && prframe->u.hdr.attrib.mcs_rate >= 20 && prframe->u.hdr.attrib.mcs_rate <= 27) { /* both spatial stream make sense */ max_spatial_stream = 2; } else { /* only spatial stream 1 makes sense */ max_spatial_stream = 1; } for (i = 0; i < max_spatial_stream; i++) { evm = evm_db2percentage((pphy_head [PHY_STAT_RXEVM_SHT + i]));/*dbm*/ prframe->u.hdr.attrib.signal_qual = (u8)(evm & 0xff); prframe->u.hdr.attrib.rx_mimo_signal_qual[i] = (u8)(evm & 0xff); } } } /* UI BSS List signal strength(in percentage), make it good looking, * from 0~100. It is assigned to the BSS List in * GetValueFromBeaconOrProbeRsp(). */ if (bcck_rate) prframe->u.hdr.attrib.signal_strength = (u8)r8712_signal_scale_mapping(pwdb_all); else { if (rf_rx_num != 0) prframe->u.hdr.attrib.signal_strength = (u8)(r8712_signal_scale_mapping(total_rssi /= rf_rx_num)); } } static void process_link_qual(struct _adapter *padapter, union recv_frame *prframe) { u32 last_evm = 0, tmpVal; struct rx_pkt_attrib *pattrib; if (prframe == NULL || padapter == NULL) return; pattrib = &prframe->u.hdr.attrib; if (pattrib->signal_qual != 0) { /* * 1. Record the general EVM to the sliding window. */ if (padapter->recvpriv.signal_qual_data.total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { padapter->recvpriv.signal_qual_data.total_num = PHY_LINKQUALITY_SLID_WIN_MAX; last_evm = padapter->recvpriv.signal_qual_data.elements [padapter->recvpriv.signal_qual_data.index]; padapter->recvpriv.signal_qual_data.total_val -= last_evm; } padapter->recvpriv.signal_qual_data.total_val += pattrib->signal_qual; padapter->recvpriv.signal_qual_data.elements[padapter-> recvpriv.signal_qual_data.index++] = pattrib->signal_qual; if (padapter->recvpriv.signal_qual_data.index >= PHY_LINKQUALITY_SLID_WIN_MAX) padapter->recvpriv.signal_qual_data.index = 0; /* <1> Showed on UI for user, in percentage. */ tmpVal = padapter->recvpriv.signal_qual_data.total_val / padapter->recvpriv.signal_qual_data.total_num; padapter->recvpriv.signal = (u8)tmpVal; } } static void process_rssi(struct _adapter *padapter, union recv_frame *prframe) { u32 last_rssi, tmp_val; struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; if (padapter->recvpriv.signal_strength_data.total_num++ >= PHY_RSSI_SLID_WIN_MAX) { padapter->recvpriv.signal_strength_data.total_num = PHY_RSSI_SLID_WIN_MAX; last_rssi = padapter->recvpriv.signal_strength_data.elements [padapter->recvpriv.signal_strength_data.index]; padapter->recvpriv.signal_strength_data.total_val -= last_rssi; } padapter->recvpriv.signal_strength_data.total_val += pattrib->signal_strength; padapter->recvpriv.signal_strength_data.elements[padapter->recvpriv. signal_strength_data.index++] = pattrib->signal_strength; if (padapter->recvpriv.signal_strength_data.index >= PHY_RSSI_SLID_WIN_MAX) padapter->recvpriv.signal_strength_data.index = 0; tmp_val = padapter->recvpriv.signal_strength_data.total_val / padapter->recvpriv.signal_strength_data.total_num; padapter->recvpriv.rssi = (s8)translate2dbm(padapter, (u8)tmp_val); } static void process_phy_info(struct _adapter *padapter, union recv_frame *prframe) { query_rx_phy_status(padapter, prframe); process_rssi(padapter, prframe); process_link_qual(padapter, prframe); } int recv_func(struct _adapter *padapter, void *pcontext) { struct rx_pkt_attrib *pattrib; union recv_frame *prframe, *orig_prframe; int retval = _SUCCESS; struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; prframe = (union recv_frame *)pcontext; orig_prframe = prframe; pattrib = &prframe->u.hdr.attrib; if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) { if (pattrib->crc_err == 1) padapter->mppriv.rx_crcerrpktcount++; else padapter->mppriv.rx_pktcount++; if (check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE) == false) { /* free this recv_frame */ r8712_free_recvframe(orig_prframe, pfree_recv_queue); goto _exit_recv_func; } } /* check the frame crtl field and decache */ retval = r8712_validate_recv_frame(padapter, prframe); if (retval != _SUCCESS) { /* free this recv_frame */ r8712_free_recvframe(orig_prframe, pfree_recv_queue); goto _exit_recv_func; } process_phy_info(padapter, prframe); prframe = r8712_decryptor(padapter, prframe); if (prframe == NULL) { retval = _FAIL; goto _exit_recv_func; } prframe = r8712_recvframe_chk_defrag(padapter, prframe); if (prframe == NULL) goto _exit_recv_func; prframe = r8712_portctrl(padapter, prframe); if (prframe == NULL) { retval = _FAIL; goto _exit_recv_func; } retval = r8712_process_recv_indicatepkts(padapter, prframe); if (retval != _SUCCESS) { r8712_free_recvframe(orig_prframe, pfree_recv_queue); goto _exit_recv_func; } _exit_recv_func: return retval; } static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb) { u8 *pbuf, shift_sz = 0; u8 frag, mf; uint pkt_len; u32 transfer_len; struct recv_stat *prxstat; u16 pkt_cnt, drvinfo_sz, pkt_offset, tmp_len, alloc_sz; struct __queue *pfree_recv_queue; _pkt *pkt_copy = NULL; union recv_frame *precvframe = NULL; struct recv_priv *precvpriv = &padapter->recvpriv; pfree_recv_queue = &(precvpriv->free_recv_queue); pbuf = pskb->data; prxstat = (struct recv_stat *)pbuf; pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16)&0xff; pkt_len = le32_to_cpu(prxstat->rxdw0)&0x00003fff; transfer_len = pskb->len; /* Test throughput with Netgear 3700 (No security) with Chariot 3T3R * pairs. The packet count will be a big number so that the containing * packet will effect the Rx reordering. */ if (transfer_len < pkt_len) { /* In this case, it means the MAX_RECVBUF_SZ is too small to * get the data from 8712u. */ return _FAIL; } do { prxstat = (struct recv_stat *)pbuf; pkt_len = le32_to_cpu(prxstat->rxdw0)&0x00003fff; /* more fragment bit */ mf = (le32_to_cpu(prxstat->rxdw1) >> 27) & 0x1; /* ragmentation number */ frag = (le32_to_cpu(prxstat->rxdw2) >> 12) & 0xf; /* uint 2^3 = 8 bytes */ drvinfo_sz = (le32_to_cpu(prxstat->rxdw0) & 0x000f0000) >> 16; drvinfo_sz <<= 3; if (pkt_len <= 0) goto _exit_recvbuf2recvframe; /* Qos data, wireless lan header length is 26 */ if ((le32_to_cpu(prxstat->rxdw0) >> 23) & 0x01) shift_sz = 2; precvframe = r8712_alloc_recvframe(pfree_recv_queue); if (precvframe == NULL) goto _exit_recvbuf2recvframe; INIT_LIST_HEAD(&precvframe->u.hdr.list); precvframe->u.hdr.precvbuf = NULL; /*can't access the precvbuf*/ precvframe->u.hdr.len = 0; tmp_len = pkt_len + drvinfo_sz + RXDESC_SIZE; pkt_offset = (u16)round_up(tmp_len, 128); /* for first fragment packet, driver need allocate 1536 + * drvinfo_sz + RXDESC_SIZE to defrag packet. */ if ((mf == 1) && (frag == 0)) /*1658+6=1664, 1664 is 128 alignment.*/ alloc_sz = max_t(u16, tmp_len, 1658); else alloc_sz = tmp_len; /* 2 is for IP header 4 bytes alignment in QoS packet case. * 4 is for skb->data 4 bytes alignment. */ alloc_sz += 6; pkt_copy = netdev_alloc_skb(padapter->pnetdev, alloc_sz); if (pkt_copy) { precvframe->u.hdr.pkt = pkt_copy; skb_reserve(pkt_copy, 4 - ((addr_t)(pkt_copy->data) % 4)); skb_reserve(pkt_copy, shift_sz); memcpy(pkt_copy->data, pbuf, tmp_len); precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data; precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz; } else { precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC); if (!precvframe->u.hdr.pkt) return _FAIL; precvframe->u.hdr.rx_head = pbuf; precvframe->u.hdr.rx_data = pbuf; precvframe->u.hdr.rx_tail = pbuf; precvframe->u.hdr.rx_end = pbuf + alloc_sz; } recvframe_put(precvframe, tmp_len); recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE); /* because the endian issue, driver avoid reference to the * rxstat after calling update_recvframe_attrib_from_recvstat(); */ update_recvframe_attrib_from_recvstat(&precvframe->u.hdr.attrib, prxstat); r8712_recv_entry(precvframe); transfer_len -= pkt_offset; pbuf += pkt_offset; pkt_cnt--; precvframe = NULL; pkt_copy = NULL; } while ((transfer_len > 0) && pkt_cnt > 0); _exit_recvbuf2recvframe: return _SUCCESS; } static void recv_tasklet(void *priv) { struct sk_buff *pskb; struct _adapter *padapter = (struct _adapter *)priv; struct recv_priv *precvpriv = &padapter->recvpriv; while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) { recvbuf2recvframe(padapter, pskb); skb_reset_tail_pointer(pskb); pskb->len = 0; if (!skb_cloned(pskb)) skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb); else consume_skb(pskb); } }
gpl-2.0
simo97/linux
drivers/staging/rtl8192e/rtl819x_TSProc.c
221
15036
/****************************************************************************** * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> ******************************************************************************/ #include "rtllib.h" #include <linux/etherdevice.h> #include "rtl819x_TS.h" static void TsSetupTimeOut(unsigned long data) { } static void TsInactTimeout(unsigned long data) { } static void RxPktPendingTimeout(unsigned long data) { struct rx_ts_record *pRxTs = (struct rx_ts_record *)data; struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device, RxTsRecord[pRxTs->num]); struct rx_reorder_entry *pReorderEntry = NULL; unsigned long flags = 0; u8 index = 0; bool bPktInBuf = false; spin_lock_irqsave(&(ieee->reorder_spinlock), flags); if (pRxTs->RxTimeoutIndicateSeq != 0xffff) { while (!list_empty(&pRxTs->RxPendingPktList)) { pReorderEntry = (struct rx_reorder_entry *) list_entry(pRxTs->RxPendingPktList.prev, struct rx_reorder_entry, List); if (index == 0) pRxTs->RxIndicateSeq = pReorderEntry->SeqNum; if (SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) { list_del_init(&pReorderEntry->List); if (SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) pRxTs->RxIndicateSeq = (pRxTs->RxIndicateSeq + 1) % 4096; netdev_dbg(ieee->dev, "%s(): Indicate SeqNum: %d\n", __func__, pReorderEntry->SeqNum); ieee->stats_IndicateArray[index] = pReorderEntry->prxb; index++; list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } } if (index > 0) { pRxTs->RxTimeoutIndicateSeq = 0xffff; if (index > REORDER_WIN_SIZE) { netdev_warn(ieee->dev, "%s(): Rx Reorder struct buffer full\n", __func__); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } rtllib_indicate_packets(ieee, ieee->stats_IndicateArray, index); bPktInBuf = false; } if (bPktInBuf && (pRxTs->RxTimeoutIndicateSeq == 0xffff)) { pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq; mod_timer(&pRxTs->RxPktPendingTimer, jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime) ); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } static void TsAddBaProcess(unsigned long data) { struct tx_ts_record *pTxTs = (struct tx_ts_record *)data; u8 num = pTxTs->num; struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device, TxTsRecord[num]); TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false); netdev_dbg(ieee->dev, "%s(): ADDBA Req is started\n", __func__); } static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo) { eth_zero_addr(pTsCommonInfo->Addr); memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body)); memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM); pTsCommonInfo->TClasProc = 0; pTsCommonInfo->TClasNum = 0; } static void ResetTxTsEntry(struct tx_ts_record *pTS) { ResetTsCommonInfo(&pTS->TsCommonInfo); pTS->TxCurSeq = 0; pTS->bAddBaReqInProgress = false; pTS->bAddBaReqDelayed = false; pTS->bUsingBa = false; pTS->bDisable_AddBa = false; ResetBaEntry(&pTS->TxAdmittedBARecord); ResetBaEntry(&pTS->TxPendingBARecord); } static void ResetRxTsEntry(struct rx_ts_record *pTS) { ResetTsCommonInfo(&pTS->TsCommonInfo); pTS->RxIndicateSeq = 0xffff; pTS->RxTimeoutIndicateSeq = 0xffff; ResetBaEntry(&pTS->RxAdmittedBARecord); } void TSInitialize(struct rtllib_device *ieee) { struct tx_ts_record *pTxTS = ieee->TxTsRecord; struct rx_ts_record *pRxTS = ieee->RxTsRecord; struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry; u8 count = 0; netdev_vdbg(ieee->dev, "%s()\n", __func__); INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List); INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List); INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List); for (count = 0; count < TOTAL_TS_NUM; count++) { pTxTS->num = count; setup_timer(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut, (unsigned long) pTxTS); setup_timer(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout, (unsigned long) pTxTS); setup_timer(&pTxTS->TsAddBaTimer, TsAddBaProcess, (unsigned long) pTxTS); setup_timer(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut, (unsigned long) pTxTS); setup_timer(&pTxTS->TxAdmittedBARecord.Timer, TxBaInactTimeout, (unsigned long) pTxTS); ResetTxTsEntry(pTxTS); list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List); pTxTS++; } INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List); INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List); INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List); for (count = 0; count < TOTAL_TS_NUM; count++) { pRxTS->num = count; INIT_LIST_HEAD(&pRxTS->RxPendingPktList); setup_timer(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut, (unsigned long) pRxTS); setup_timer(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout, (unsigned long) pRxTS); setup_timer(&pRxTS->RxAdmittedBARecord.Timer, RxBaInactTimeout, (unsigned long) pRxTS); setup_timer(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, (unsigned long) pRxTS); ResetRxTsEntry(pRxTS); list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List); pRxTS++; } INIT_LIST_HEAD(&ieee->RxReorder_Unused_List); for (count = 0; count < REORDER_ENTRY_NUM; count++) { list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List); if (count == (REORDER_ENTRY_NUM-1)) break; pRxReorderEntry = &ieee->RxReorderEntry[count+1]; } } static void AdmitTS(struct rtllib_device *ieee, struct ts_common_info *pTsCommonInfo, u32 InactTime) { del_timer_sync(&pTsCommonInfo->SetupTimer); del_timer_sync(&pTsCommonInfo->InactTimer); if (InactTime != 0) mod_timer(&pTsCommonInfo->InactTimer, jiffies + msecs_to_jiffies(InactTime)); } static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee, u8 *Addr, u8 TID, enum tr_select TxRxSelect) { u8 dir; bool search_dir[4] = {0}; struct list_head *psearch_list; struct ts_common_info *pRet = NULL; if (ieee->iw_mode == IW_MODE_MASTER) { if (TxRxSelect == TX_DIR) { search_dir[DIR_DOWN] = true; search_dir[DIR_BI_DIR] = true; } else { search_dir[DIR_UP] = true; search_dir[DIR_BI_DIR] = true; } } else if (ieee->iw_mode == IW_MODE_ADHOC) { if (TxRxSelect == TX_DIR) search_dir[DIR_UP] = true; else search_dir[DIR_DOWN] = true; } else { if (TxRxSelect == TX_DIR) { search_dir[DIR_UP] = true; search_dir[DIR_BI_DIR] = true; search_dir[DIR_DIRECT] = true; } else { search_dir[DIR_DOWN] = true; search_dir[DIR_BI_DIR] = true; search_dir[DIR_DIRECT] = true; } } if (TxRxSelect == TX_DIR) psearch_list = &ieee->Tx_TS_Admit_List; else psearch_list = &ieee->Rx_TS_Admit_List; for (dir = 0; dir <= DIR_BI_DIR; dir++) { if (!search_dir[dir]) continue; list_for_each_entry(pRet, psearch_list, List) { if (memcmp(pRet->Addr, Addr, 6) == 0 && pRet->TSpec.f.TSInfo.field.ucTSID == TID && pRet->TSpec.f.TSInfo.field.ucDirection == dir) break; } if (&pRet->List != psearch_list) break; } if (pRet && &pRet->List != psearch_list) return pRet; return NULL; } static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr, union tspec_body *pTSPEC, union qos_tclas *pTCLAS, u8 TCLAS_Num, u8 TCLAS_Proc) { u8 count; if (pTsCommonInfo == NULL) return; memcpy(pTsCommonInfo->Addr, Addr, 6); if (pTSPEC != NULL) memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC, sizeof(union tspec_body)); for (count = 0; count < TCLAS_Num; count++) memcpy((u8 *)(&(pTsCommonInfo->TClass[count])), (u8 *)pTCLAS, sizeof(union qos_tclas)); pTsCommonInfo->TClasProc = TCLAS_Proc; pTsCommonInfo->TClasNum = TCLAS_Num; } bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) { u8 UP = 0; union tspec_body TSpec; union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo; struct list_head *pUnusedList; struct list_head *pAddmitList; enum direction_value Dir; if (is_multicast_ether_addr(Addr)) { netdev_warn(ieee->dev, "Get TS for Broadcast or Multicast\n"); return false; } if (ieee->current_network.qos_data.supported == 0) { UP = 0; } else { if (!IsACValid(TID)) { netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", __func__, TID); return false; } switch (TID) { case 0: case 3: UP = 0; break; case 1: case 2: UP = 2; break; case 4: case 5: UP = 5; break; case 6: case 7: UP = 7; break; } } *ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect); if (*ppTS != NULL) return true; if (!bAddNewTs) { netdev_dbg(ieee->dev, "add new TS failed(tid:%d)\n", UP); return false; } pUnusedList = (TxRxSelect == TX_DIR) ? (&ieee->Tx_TS_Unused_List) : (&ieee->Rx_TS_Unused_List); pAddmitList = (TxRxSelect == TX_DIR) ? (&ieee->Tx_TS_Admit_List) : (&ieee->Rx_TS_Admit_List); Dir = (ieee->iw_mode == IW_MODE_MASTER) ? ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) : ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN); if (!list_empty(pUnusedList)) { (*ppTS) = list_entry(pUnusedList->next, struct ts_common_info, List); list_del_init(&(*ppTS)->List); if (TxRxSelect == TX_DIR) { struct tx_ts_record *tmp = container_of(*ppTS, struct tx_ts_record, TsCommonInfo); ResetTxTsEntry(tmp); } else { struct rx_ts_record *tmp = container_of(*ppTS, struct rx_ts_record, TsCommonInfo); ResetRxTsEntry(tmp); } netdev_dbg(ieee->dev, "to init current TS, UP:%d, Dir:%d, addr: %pM ppTs=%p\n", UP, Dir, Addr, *ppTS); pTSInfo->field.ucTrafficType = 0; pTSInfo->field.ucTSID = UP; pTSInfo->field.ucDirection = Dir; pTSInfo->field.ucAccessPolicy = 1; pTSInfo->field.ucAggregation = 0; pTSInfo->field.ucPSB = 0; pTSInfo->field.ucUP = UP; pTSInfo->field.ucTSInfoAckPolicy = 0; pTSInfo->field.ucSchedule = 0; MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0); AdmitTS(ieee, *ppTS, 0); list_add_tail(&((*ppTS)->List), pAddmitList); return true; } netdev_warn(ieee->dev, "There is not enough dir=%d(0=up down=1) TS record to be used!", Dir); return false; } static void RemoveTsEntry(struct rtllib_device *ieee, struct ts_common_info *pTs, enum tr_select TxRxSelect) { del_timer_sync(&pTs->SetupTimer); del_timer_sync(&pTs->InactTimer); TsInitDelBA(ieee, pTs, TxRxSelect); if (TxRxSelect == RX_DIR) { struct rx_reorder_entry *pRxReorderEntry; struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs; if (timer_pending(&pRxTS->RxPktPendingTimer)) del_timer_sync(&pRxTS->RxPktPendingTimer); while (!list_empty(&pRxTS->RxPendingPktList)) { pRxReorderEntry = (struct rx_reorder_entry *) list_entry(pRxTS->RxPendingPktList.prev, struct rx_reorder_entry, List); netdev_dbg(ieee->dev, "%s(): Delete SeqNum %d!\n", __func__, pRxReorderEntry->SeqNum); list_del_init(&pRxReorderEntry->List); { int i = 0; struct rtllib_rxb *prxb = pRxReorderEntry->prxb; if (unlikely(!prxb)) return; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List); } } else { struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs; del_timer_sync(&pTxTS->TsAddBaTimer); } } void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr) { struct ts_common_info *pTS, *pTmpTS; netdev_info(ieee->dev, "===========>RemovePeerTS, %pM\n", Addr); list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { netdev_info(ieee->dev, "====>remove Tx_TS_admin_list\n"); RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) { if (memcmp(pTS->Addr, Addr, 6) == 0) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } } EXPORT_SYMBOL(RemovePeerTS); void RemoveAllTS(struct rtllib_device *ieee) { struct ts_common_info *pTS, *pTmpTS; list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) { RemoveTsEntry(ieee, pTS, TX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) { RemoveTsEntry(ieee, pTS, RX_DIR); list_del_init(&pTS->List); list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List); } } void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS) { if (pTxTS->bAddBaReqInProgress == false) { pTxTS->bAddBaReqInProgress = true; if (pTxTS->bAddBaReqDelayed) { netdev_dbg(ieee->dev, "Start ADDBA after 60 sec!!\n"); mod_timer(&pTxTS->TsAddBaTimer, jiffies + msecs_to_jiffies(TS_ADDBA_DELAY)); } else { netdev_dbg(ieee->dev, "Immediately Start ADDBA\n"); mod_timer(&pTxTS->TsAddBaTimer, jiffies+10); } } else netdev_dbg(ieee->dev, "BA timer is already added\n"); }
gpl-2.0
wendal/rk2918_uzone_f0_top
arch/mips/sni/setup.c
733
5661
/* * Setup pointers to hardware-dependent routines. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de) */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/screen_info.h> #ifdef CONFIG_ARC #include <asm/fw/arc/types.h> #include <asm/sgialib.h> #endif #ifdef CONFIG_SNIPROM #include <asm/mipsprom.h> #endif #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/reboot.h> #include <asm/sni.h> unsigned int sni_brd_type; EXPORT_SYMBOL(sni_brd_type); extern void sni_machine_restart(char *command); extern void sni_machine_power_off(void); static void __init sni_display_setup(void) { #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_ARC) struct screen_info *si = &screen_info; DISPLAY_STATUS *di; di = ArcGetDisplayStatus(1); if (di) { si->orig_x = di->CursorXPosition; si->orig_y = di->CursorYPosition; si->orig_video_cols = di->CursorMaxXPosition; si->orig_video_lines = di->CursorMaxYPosition; si->orig_video_isVGA = VIDEO_TYPE_VGAC; si->orig_video_points = 16; } #endif } static void __init sni_console_setup(void) { #ifndef CONFIG_ARC char *ctype; char *cdev; char *baud; int port; static char options[8]; cdev = prom_getenv("console_dev"); if (strncmp(cdev, "tty", 3) == 0) { ctype = prom_getenv("console"); switch (*ctype) { default: case 'l': port = 0; baud = prom_getenv("lbaud"); break; case 'r': port = 1; baud = prom_getenv("rbaud"); break; } if (baud) strcpy(options, baud); if (strncmp(cdev, "tty552", 6) == 0) add_preferred_console("ttyS", port, baud ? options : NULL); else add_preferred_console("ttySC", port, baud ? options : NULL); } #endif } #ifdef DEBUG static void __init sni_idprom_dump(void) { int i; pr_debug("SNI IDProm dump:\n"); for (i = 0; i < 256; i++) { if (i%16 == 0) pr_debug("%04x ", i); printk("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i)); if (i % 16 == 15) printk("\n"); } } #endif void __init plat_mem_setup(void) { int cputype; set_io_port_base(SNI_PORT_BASE); // ioport_resource.end = sni_io_resource.end; /* * Setup (E)ISA I/O memory access stuff */ #ifdef CONFIG_EISA EISA_bus = 1; #endif sni_brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE; cputype = *(unsigned char *)SNI_IDPROM_CPUTYPE; switch (sni_brd_type) { case SNI_BRD_TOWER_OASIC: switch (cputype) { case SNI_CPU_M8030: system_type = "RM400-330"; break; case SNI_CPU_M8031: system_type = "RM400-430"; break; case SNI_CPU_M8037: system_type = "RM400-530"; break; case SNI_CPU_M8034: system_type = "RM400-730"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_MINITOWER: switch (cputype) { case SNI_CPU_M8021: case SNI_CPU_M8043: system_type = "RM400-120"; break; case SNI_CPU_M8040: system_type = "RM400-220"; break; case SNI_CPU_M8053: system_type = "RM400-225"; break; case SNI_CPU_M8050: system_type = "RM400-420"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_PCI_TOWER: system_type = "RM400-Cxx"; break; case SNI_BRD_RM200: system_type = "RM200-xxx"; break; case SNI_BRD_PCI_MTOWER: system_type = "RM300-Cxx"; break; case SNI_BRD_PCI_DESKTOP: switch (read_c0_prid() & 0xff00) { case PRID_IMP_R4600: case PRID_IMP_R4700: system_type = "RM200-C20"; break; case PRID_IMP_R5000: system_type = "RM200-C40"; break; default: system_type = "RM200-Cxx"; break; } break; case SNI_BRD_PCI_TOWER_CPLUS: system_type = "RM400-Exx"; break; case SNI_BRD_PCI_MTOWER_CPLUS: system_type = "RM300-Exx"; break; } pr_debug("Found SNI brdtype %02x name %s\n", sni_brd_type, system_type); #ifdef DEBUG sni_idprom_dump(); #endif switch (sni_brd_type) { case SNI_BRD_10: case SNI_BRD_10NEW: case SNI_BRD_TOWER_OASIC: case SNI_BRD_MINITOWER: sni_a20r_init(); break; case SNI_BRD_PCI_TOWER: case SNI_BRD_PCI_TOWER_CPLUS: sni_pcit_init(); break; case SNI_BRD_RM200: sni_rm200_init(); break; case SNI_BRD_PCI_MTOWER: case SNI_BRD_PCI_DESKTOP: case SNI_BRD_PCI_MTOWER_CPLUS: sni_pcimt_init(); break; } _machine_restart = sni_machine_restart; pm_power_off = sni_machine_power_off; sni_display_setup(); sni_console_setup(); } #ifdef CONFIG_PCI #include <linux/pci.h> #include <video/vga.h> #include <video/cirrus.h> static void __devinit quirk_cirrus_ram_size(struct pci_dev *dev) { u16 cmd; /* * firmware doesn't set the ram size correct, so we * need to do it here, otherwise we get screen corruption * on older Cirrus chips */ pci_read_config_word(dev, PCI_COMMAND, &cmd); if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) { vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */ vga_wseq(NULL, CL_SEQRF, 0x18); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, quirk_cirrus_ram_size); #endif
gpl-2.0
NeptunIDE/linux
arch/mips/lasat/prom.c
733
2800
/* * PROM interface routines. */ #include <linux/types.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <asm/bootinfo.h> #include <asm/lasat/lasat.h> #include <asm/cpu.h> #include "at93c.h" #include <asm/lasat/eeprom.h> #include "prom.h" #define RESET_VECTOR 0xbfc00000 #define PROM_JUMP_TABLE_ENTRY(n) (*((u32 *)(RESET_VECTOR + 0x20) + n)) #define PROM_DISPLAY_ADDR PROM_JUMP_TABLE_ENTRY(0) #define PROM_PUTC_ADDR PROM_JUMP_TABLE_ENTRY(1) #define PROM_MONITOR_ADDR PROM_JUMP_TABLE_ENTRY(2) static void null_prom_display(const char *string, int pos, int clear) { } static void null_prom_monitor(void) { } static void null_prom_putc(char c) { } /* these are functions provided by the bootloader */ static void (*__prom_putc)(char c) = null_prom_putc; void prom_putchar(char c) { __prom_putc(c); } void (*prom_display)(const char *string, int pos, int clear) = null_prom_display; void (*prom_monitor)(void) = null_prom_monitor; unsigned int lasat_ndelay_divider; static void setup_prom_vectors(void) { u32 version = *(u32 *)(RESET_VECTOR + 0x90); if (version >= 307) { prom_display = (void *)PROM_DISPLAY_ADDR; __prom_putc = (void *)PROM_PUTC_ADDR; prom_monitor = (void *)PROM_MONITOR_ADDR; } printk(KERN_DEBUG "prom vectors set up\n"); } static struct at93c_defs at93c_defs[N_MACHTYPES] = { { .reg = (void *)AT93C_REG_100, .rdata_reg = (void *)AT93C_RDATA_REG_100, .rdata_shift = AT93C_RDATA_SHIFT_100, .wdata_shift = AT93C_WDATA_SHIFT_100, .cs = AT93C_CS_M_100, .clk = AT93C_CLK_M_100 }, { .reg = (void *)AT93C_REG_200, .rdata_reg = (void *)AT93C_RDATA_REG_200, .rdata_shift = AT93C_RDATA_SHIFT_200, .wdata_shift = AT93C_WDATA_SHIFT_200, .cs = AT93C_CS_M_200, .clk = AT93C_CLK_M_200 }, }; void __init prom_init(void) { int argc = fw_arg0; char **argv = (char **) fw_arg1; setup_prom_vectors(); if (IS_LASAT_200()) { printk(KERN_INFO "LASAT 200 board\n"); lasat_ndelay_divider = LASAT_200_DIVIDER; at93c = &at93c_defs[1]; } else { printk(KERN_INFO "LASAT 100 board\n"); lasat_ndelay_divider = LASAT_100_DIVIDER; at93c = &at93c_defs[0]; } lasat_init_board_info(); /* Read info from EEPROM */ /* Get the command line */ if (argc > 0) { strncpy(arcs_cmdline, argv[0], CL_SIZE-1); arcs_cmdline[CL_SIZE-1] = '\0'; } /* Set the I/O base address */ set_io_port_base(KSEG1); /* Set memory regions */ ioport_resource.start = 0; ioport_resource.end = 0xffffffff; /* Wrong, fixme. */ add_memory_region(0, lasat_board_info.li_memsize, BOOT_MEM_RAM); } void __init prom_free_prom_memory(void) { } const char *get_system_type(void) { return lasat_board_info.li_bmstr; }
gpl-2.0
Ander-Alvarez/CoffeeKernel
fs/romfs/super.c
1757
15504
/* Block- or MTD-based romfs * * Copyright © 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Derived from: ROMFS file system, Linux implementation * * Copyright © 1997-1999 Janos Farkas <chexum@shadow.banki.hu> * * Using parts of the minix filesystem * Copyright © 1991, 1992 Linus Torvalds * * and parts of the affs filesystem additionally * Copyright © 1993 Ray Burr * Copyright © 1996 Hans-Joachim Widmaier * * Changes * Changed for 2.1.19 modules * Jan 1997 Initial release * Jun 1997 2.1.43+ changes * Proper page locking in readpage * Changed to work with 2.1.45+ fs * Jul 1997 Fixed follow_link * 2.1.47 * lookup shouldn't return -ENOENT * from Horst von Brand: * fail on wrong checksum * double unlock_super was possible * correct namelen for statfs * spotted by Bill Hawes: * readlink shouldn't iput() * Jun 1998 2.1.106 from Avery Pennarun: glibc scandir() * exposed a problem in readdir * 2.1.107 code-freeze spellchecker run * Aug 1998 2.1.118+ VFS changes * Sep 1998 2.1.122 another VFS change (follow_link) * Apr 1999 2.2.7 no more EBADF checking in * lookup/readdir, use ERR_PTR * Jun 1999 2.3.6 d_alloc_root use changed * 2.3.9 clean up usage of ENOENT/negative * dentries in lookup * clean up page flags setting * (error, uptodate, locking) in * in readpage * use init_special_inode for * fifos/sockets (and streamline) in * read_inode, fix _ops table order * Aug 1999 2.3.16 __initfunc() => __init change * Oct 1999 2.3.24 page->owner hack obsoleted * Nov 1999 2.3.27 2.3.25+ page->offset => index change * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/mtd/super.h> #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include "internal.h" static struct kmem_cache *romfs_inode_cachep; static const umode_t romfs_modemap[8] = { 0, /* hard link */ S_IFDIR | 0644, /* directory */ S_IFREG | 0644, /* regular file */ S_IFLNK | 0777, /* symlink */ S_IFBLK | 0600, /* blockdev */ S_IFCHR | 0600, /* chardev */ S_IFSOCK | 0644, /* socket */ S_IFIFO | 0644 /* FIFO */ }; static const unsigned char romfs_dtype_table[] = { DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_SOCK, DT_FIFO }; static struct inode *romfs_iget(struct super_block *sb, unsigned long pos); /* * read a page worth of data from the image */ static int romfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; loff_t offset, size; unsigned long fillsize, pos; void *buf; int ret; buf = kmap(page); if (!buf) return -ENOMEM; /* 32 bit warning -- but not for us :) */ offset = page_offset(page); size = i_size_read(inode); fillsize = 0; ret = 0; if (offset < size) { size -= offset; fillsize = size > PAGE_SIZE ? PAGE_SIZE : size; pos = ROMFS_I(inode)->i_dataoffset + offset; ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize); if (ret < 0) { SetPageError(page); fillsize = 0; ret = -EIO; } } if (fillsize < PAGE_SIZE) memset(buf + fillsize, 0, PAGE_SIZE - fillsize); if (ret == 0) SetPageUptodate(page); flush_dcache_page(page); kunmap(page); unlock_page(page); return ret; } static const struct address_space_operations romfs_aops = { .readpage = romfs_readpage }; /* * read the entries from a directory */ static int romfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *i = file_inode(filp); struct romfs_inode ri; unsigned long offset, maxoff; int j, ino, nextfh; int stored = 0; char fsname[ROMFS_MAXFN]; /* XXX dynamic? */ int ret; maxoff = romfs_maxsize(i->i_sb); offset = filp->f_pos; if (!offset) { offset = i->i_ino & ROMFH_MASK; ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; offset = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* Not really failsafe, but we are read-only... */ for (;;) { if (!offset || offset >= maxoff) { offset = maxoff; filp->f_pos = offset; goto out; } filp->f_pos = offset; /* Fetch inode info */ ret = romfs_dev_read(i->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto out; j = romfs_dev_strnlen(i->i_sb, offset + ROMFH_SIZE, sizeof(fsname) - 1); if (j < 0) goto out; ret = romfs_dev_read(i->i_sb, offset + ROMFH_SIZE, fsname, j); if (ret < 0) goto out; fsname[j] = '\0'; ino = offset; nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) == ROMFH_HRD) ino = be32_to_cpu(ri.spec); if (filldir(dirent, fsname, j, offset, ino, romfs_dtype_table[nextfh & ROMFH_TYPE]) < 0) goto out; stored++; offset = nextfh & ROMFH_MASK; } out: return stored; } /* * look up an entry in a directory */ static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { unsigned long offset, maxoff; struct inode *inode; struct romfs_inode ri; const char *name; /* got from dentry */ int len, ret; offset = dir->i_ino & ROMFH_MASK; ret = romfs_dev_read(dir->i_sb, offset, &ri, ROMFH_SIZE); if (ret < 0) goto error; /* search all the file entries in the list starting from the one * pointed to by the directory's special data */ maxoff = romfs_maxsize(dir->i_sb); offset = be32_to_cpu(ri.spec) & ROMFH_MASK; name = dentry->d_name.name; len = dentry->d_name.len; for (;;) { if (!offset || offset >= maxoff) goto out0; ret = romfs_dev_read(dir->i_sb, offset, &ri, sizeof(ri)); if (ret < 0) goto error; /* try to match the first 16 bytes of name */ ret = romfs_dev_strcmp(dir->i_sb, offset + ROMFH_SIZE, name, len); if (ret < 0) goto error; if (ret == 1) break; /* next entry */ offset = be32_to_cpu(ri.next) & ROMFH_MASK; } /* Hard link handling */ if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD) offset = be32_to_cpu(ri.spec) & ROMFH_MASK; inode = romfs_iget(dir->i_sb, offset); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto error; } goto outi; /* * it's a bit funky, _lookup needs to return an error code * (negative) or a NULL, both as a dentry. ENOENT should not * be returned, instead we need to create a negative dentry by * d_add(dentry, NULL); and return 0 as no error. * (Although as I see, it only matters on writable file * systems). */ out0: inode = NULL; outi: d_add(dentry, inode); ret = 0; error: return ERR_PTR(ret); } static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, .readdir = romfs_readdir, .llseek = default_llseek, }; static const struct inode_operations romfs_dir_inode_operations = { .lookup = romfs_lookup, }; /* * get a romfs inode based on its position in the image (which doubles as the * inode number) */ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) { struct romfs_inode_info *inode; struct romfs_inode ri; struct inode *i; unsigned long nlen; unsigned nextfh; int ret; umode_t mode; /* we might have to traverse a chain of "hard link" file entries to get * to the actual file */ for (;;) { ret = romfs_dev_read(sb, pos, &ri, sizeof(ri)); if (ret < 0) goto error; /* XXX: do romfs_checksum here too (with name) */ nextfh = be32_to_cpu(ri.next); if ((nextfh & ROMFH_TYPE) != ROMFH_HRD) break; pos = be32_to_cpu(ri.spec) & ROMFH_MASK; } /* determine the length of the filename */ nlen = romfs_dev_strnlen(sb, pos + ROMFH_SIZE, ROMFS_MAXFN); if (IS_ERR_VALUE(nlen)) goto eio; /* get an inode for this image position */ i = iget_locked(sb, pos); if (!i) return ERR_PTR(-ENOMEM); if (!(i->i_state & I_NEW)) return i; /* precalculate the data offset */ inode = ROMFS_I(i); inode->i_metasize = (ROMFH_SIZE + nlen + 1 + ROMFH_PAD) & ROMFH_MASK; inode->i_dataoffset = pos + inode->i_metasize; set_nlink(i, 1); /* Hard to decide.. */ i->i_size = be32_to_cpu(ri.size); i->i_mtime.tv_sec = i->i_atime.tv_sec = i->i_ctime.tv_sec = 0; i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0; /* set up mode and ops */ mode = romfs_modemap[nextfh & ROMFH_TYPE]; switch (nextfh & ROMFH_TYPE) { case ROMFH_DIR: i->i_size = ROMFS_I(i)->i_metasize; i->i_op = &romfs_dir_inode_operations; i->i_fop = &romfs_dir_operations; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_REG: i->i_fop = &romfs_ro_fops; i->i_data.a_ops = &romfs_aops; if (i->i_sb->s_mtd) i->i_data.backing_dev_info = i->i_sb->s_mtd->backing_dev_info; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; case ROMFH_SYM: i->i_op = &page_symlink_inode_operations; i->i_data.a_ops = &romfs_aops; mode |= S_IRWXUGO; break; default: /* depending on MBZ for sock/fifos */ nextfh = be32_to_cpu(ri.spec); init_special_inode(i, mode, MKDEV(nextfh >> 16, nextfh & 0xffff)); break; } i->i_mode = mode; unlock_new_inode(i); return i; eio: ret = -EIO; error: printk(KERN_ERR "ROMFS: read error for inode 0x%lx\n", pos); return ERR_PTR(ret); } /* * allocate a new inode */ static struct inode *romfs_alloc_inode(struct super_block *sb) { struct romfs_inode_info *inode; inode = kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL); return inode ? &inode->vfs_inode : NULL; } /* * return a spent inode to the slab cache */ static void romfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } static void romfs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, romfs_i_callback); } /* * get filesystem statistics */ static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = ROMFS_MAGIC; buf->f_namelen = ROMFS_MAXFN; buf->f_bsize = ROMBSIZE; buf->f_bfree = buf->f_bavail = buf->f_ffree; buf->f_blocks = (romfs_maxsize(dentry->d_sb) + ROMBSIZE - 1) >> ROMBSBITS; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * remounting must involve read-only */ static int romfs_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_RDONLY; return 0; } static const struct super_operations romfs_super_ops = { .alloc_inode = romfs_alloc_inode, .destroy_inode = romfs_destroy_inode, .statfs = romfs_statfs, .remount_fs = romfs_remount, }; /* * checksum check on part of a romfs filesystem */ static __u32 romfs_checksum(const void *data, int size) { const __be32 *ptr = data; __u32 sum; sum = 0; size >>= 2; while (size > 0) { sum += be32_to_cpu(*ptr++); size--; } return sum; } /* * fill in the superblock */ static int romfs_fill_super(struct super_block *sb, void *data, int silent) { struct romfs_super_block *rsb; struct inode *root; unsigned long pos, img_size; const char *storage; size_t len; int ret; #ifdef CONFIG_BLOCK if (!sb->s_mtd) { sb_set_blocksize(sb, ROMBSIZE); } else { sb->s_blocksize = ROMBSIZE; sb->s_blocksize_bits = blksize_bits(ROMBSIZE); } #endif sb->s_maxbytes = 0xFFFFFFFF; sb->s_magic = ROMFS_MAGIC; sb->s_flags |= MS_RDONLY | MS_NOATIME; sb->s_op = &romfs_super_ops; /* read the image superblock and check it */ rsb = kmalloc(512, GFP_KERNEL); if (!rsb) return -ENOMEM; sb->s_fs_info = (void *) 512; ret = romfs_dev_read(sb, 0, rsb, 512); if (ret < 0) goto error_rsb; img_size = be32_to_cpu(rsb->size); if (sb->s_mtd && img_size > sb->s_mtd->size) goto error_rsb_inval; sb->s_fs_info = (void *) img_size; if (rsb->word0 != ROMSB_WORD0 || rsb->word1 != ROMSB_WORD1 || img_size < ROMFH_SIZE) { if (!silent) printk(KERN_WARNING "VFS:" " Can't find a romfs filesystem on dev %s.\n", sb->s_id); goto error_rsb_inval; } if (romfs_checksum(rsb, min_t(size_t, img_size, 512))) { printk(KERN_ERR "ROMFS: bad initial checksum on dev %s.\n", sb->s_id); goto error_rsb_inval; } storage = sb->s_mtd ? "MTD" : "the block layer"; len = strnlen(rsb->name, ROMFS_MAXFN); if (!silent) printk(KERN_NOTICE "ROMFS: Mounting image '%*.*s' through %s\n", (unsigned) len, (unsigned) len, rsb->name, storage); kfree(rsb); rsb = NULL; /* find the root directory */ pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK; root = romfs_iget(sb, pos); if (IS_ERR(root)) goto error; sb->s_root = d_make_root(root); if (!sb->s_root) goto error; return 0; error: return -EINVAL; error_rsb_inval: ret = -EINVAL; error_rsb: kfree(rsb); return ret; } /* * get a superblock for mounting */ static struct dentry *romfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct dentry *ret = ERR_PTR(-EINVAL); #ifdef CONFIG_ROMFS_ON_MTD ret = mount_mtd(fs_type, flags, dev_name, data, romfs_fill_super); #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (ret == ERR_PTR(-EINVAL)) ret = mount_bdev(fs_type, flags, dev_name, data, romfs_fill_super); #endif return ret; } /* * destroy a romfs superblock in the appropriate manner */ static void romfs_kill_sb(struct super_block *sb) { #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) { kill_mtd_super(sb); return; } #endif #ifdef CONFIG_ROMFS_ON_BLOCK if (sb->s_bdev) { kill_block_super(sb); return; } #endif } static struct file_system_type romfs_fs_type = { .owner = THIS_MODULE, .name = "romfs", .mount = romfs_mount, .kill_sb = romfs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("romfs"); /* * inode storage initialiser */ static void romfs_i_init_once(void *_inode) { struct romfs_inode_info *inode = _inode; inode_init_once(&inode->vfs_inode); } /* * romfs module initialisation */ static int __init init_romfs_fs(void) { int ret; printk(KERN_INFO "ROMFS MTD (C) 2007 Red Hat, Inc.\n"); romfs_inode_cachep = kmem_cache_create("romfs_i", sizeof(struct romfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, romfs_i_init_once); if (!romfs_inode_cachep) { printk(KERN_ERR "ROMFS error: Failed to initialise inode cache\n"); return -ENOMEM; } ret = register_filesystem(&romfs_fs_type); if (ret) { printk(KERN_ERR "ROMFS error: Failed to register filesystem\n"); goto error_register; } return 0; error_register: kmem_cache_destroy(romfs_inode_cachep); return ret; } /* * romfs module removal */ static void __exit exit_romfs_fs(void) { unregister_filesystem(&romfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(romfs_inode_cachep); } module_init(init_romfs_fs); module_exit(exit_romfs_fs); MODULE_DESCRIPTION("Direct-MTD Capable RomFS"); MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* Actually dual-licensed, but it doesn't matter for */
gpl-2.0
klquicksall/Galaxy-Nexus-4.2
arch/arm/mach-s5pc100/clock.c
2013
31325
/* linux/arch/arm/mach-s5pc100/clock.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S5PC100 - Clock support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/map.h> #include <plat/cpu-freq.h> #include <mach/regs-clock.h> #include <plat/clock.h> #include <plat/cpu.h> #include <plat/pll.h> #include <plat/s5p-clock.h> #include <plat/clock-clksrc.h> #include <plat/s5pc100.h> static struct clk s5p_clk_otgphy = { .name = "otg_phy", .id = -1, }; static struct clk *clk_src_mout_href_list[] = { [0] = &s5p_clk_27m, [1] = &clk_fin_hpll, }; static struct clksrc_sources clk_src_mout_href = { .sources = clk_src_mout_href_list, .nr_sources = ARRAY_SIZE(clk_src_mout_href_list), }; static struct clksrc_clk clk_mout_href = { .clk = { .name = "mout_href", .id = -1, }, .sources = &clk_src_mout_href, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 20, .size = 1 }, }; static struct clk *clk_src_mout_48m_list[] = { [0] = &clk_xusbxti, [1] = &s5p_clk_otgphy, }; static struct clksrc_sources clk_src_mout_48m = { .sources = clk_src_mout_48m_list, .nr_sources = ARRAY_SIZE(clk_src_mout_48m_list), }; static struct clksrc_clk clk_mout_48m = { .clk = { .name = "mout_48m", .id = -1, }, .sources = &clk_src_mout_48m, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 24, .size = 1 }, }; static struct clksrc_clk clk_mout_mpll = { .clk = { .name = "mout_mpll", .id = -1, }, .sources = &clk_src_mpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 4, .size = 1 }, }; static struct clksrc_clk clk_mout_apll = { .clk = { .name = "mout_apll", .id = -1, }, .sources = &clk_src_apll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 0, .size = 1 }, }; static struct clksrc_clk clk_mout_epll = { .clk = { .name = "mout_epll", .id = -1, }, .sources = &clk_src_epll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 8, .size = 1 }, }; static struct clk *clk_src_mout_hpll_list[] = { [0] = &s5p_clk_27m, }; static struct clksrc_sources clk_src_mout_hpll = { .sources = clk_src_mout_hpll_list, .nr_sources = ARRAY_SIZE(clk_src_mout_hpll_list), }; static struct clksrc_clk clk_mout_hpll = { .clk = { .name = "mout_hpll", .id = -1, }, .sources = &clk_src_mout_hpll, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 12, .size = 1 }, }; static struct clksrc_clk clk_div_apll = { .clk = { .name = "div_apll", .id = -1, .parent = &clk_mout_apll.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 0, .size = 1 }, }; static struct clksrc_clk clk_div_arm = { .clk = { .name = "div_arm", .id = -1, .parent = &clk_div_apll.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 4, .size = 3 }, }; static struct clksrc_clk clk_div_d0_bus = { .clk = { .name = "div_d0_bus", .id = -1, .parent = &clk_div_arm.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 8, .size = 3 }, }; static struct clksrc_clk clk_div_pclkd0 = { .clk = { .name = "div_pclkd0", .id = -1, .parent = &clk_div_d0_bus.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 12, .size = 3 }, }; static struct clksrc_clk clk_div_secss = { .clk = { .name = "div_secss", .id = -1, .parent = &clk_div_d0_bus.clk, }, .reg_div = { .reg = S5P_CLK_DIV0, .shift = 16, .size = 3 }, }; static struct clksrc_clk clk_div_apll2 = { .clk = { .name = "div_apll2", .id = -1, .parent = &clk_mout_apll.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 0, .size = 3 }, }; static struct clk *clk_src_mout_am_list[] = { [0] = &clk_mout_mpll.clk, [1] = &clk_div_apll2.clk, }; struct clksrc_sources clk_src_mout_am = { .sources = clk_src_mout_am_list, .nr_sources = ARRAY_SIZE(clk_src_mout_am_list), }; static struct clksrc_clk clk_mout_am = { .clk = { .name = "mout_am", .id = -1, }, .sources = &clk_src_mout_am, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 16, .size = 1 }, }; static struct clksrc_clk clk_div_d1_bus = { .clk = { .name = "div_d1_bus", .id = -1, .parent = &clk_mout_am.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 3 }, }; static struct clksrc_clk clk_div_mpll2 = { .clk = { .name = "div_mpll2", .id = -1, .parent = &clk_mout_am.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 8, .size = 1 }, }; static struct clksrc_clk clk_div_mpll = { .clk = { .name = "div_mpll", .id = -1, .parent = &clk_mout_am.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 4, .size = 2 }, }; static struct clk *clk_src_mout_onenand_list[] = { [0] = &clk_div_d0_bus.clk, [1] = &clk_div_d1_bus.clk, }; struct clksrc_sources clk_src_mout_onenand = { .sources = clk_src_mout_onenand_list, .nr_sources = ARRAY_SIZE(clk_src_mout_onenand_list), }; static struct clksrc_clk clk_mout_onenand = { .clk = { .name = "mout_onenand", .id = -1, }, .sources = &clk_src_mout_onenand, .reg_src = { .reg = S5P_CLK_SRC0, .shift = 24, .size = 1 }, }; static struct clksrc_clk clk_div_onenand = { .clk = { .name = "div_onenand", .id = -1, .parent = &clk_mout_onenand.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 20, .size = 2 }, }; static struct clksrc_clk clk_div_pclkd1 = { .clk = { .name = "div_pclkd1", .id = -1, .parent = &clk_div_d1_bus.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 16, .size = 3 }, }; static struct clksrc_clk clk_div_cam = { .clk = { .name = "div_cam", .id = -1, .parent = &clk_div_mpll2.clk, }, .reg_div = { .reg = S5P_CLK_DIV1, .shift = 24, .size = 5 }, }; static struct clksrc_clk clk_div_hdmi = { .clk = { .name = "div_hdmi", .id = -1, .parent = &clk_mout_hpll.clk, }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 28, .size = 4 }, }; static u32 epll_div[][4] = { { 32750000, 131, 3, 4 }, { 32768000, 131, 3, 4 }, { 36000000, 72, 3, 3 }, { 45000000, 90, 3, 3 }, { 45158000, 90, 3, 3 }, { 45158400, 90, 3, 3 }, { 48000000, 96, 3, 3 }, { 49125000, 131, 4, 3 }, { 49152000, 131, 4, 3 }, { 60000000, 120, 3, 3 }, { 67737600, 226, 5, 3 }, { 67738000, 226, 5, 3 }, { 73800000, 246, 5, 3 }, { 73728000, 246, 5, 3 }, { 72000000, 144, 3, 3 }, { 84000000, 168, 3, 3 }, { 96000000, 96, 3, 2 }, { 144000000, 144, 3, 2 }, { 192000000, 96, 3, 1 } }; static int s5pc100_epll_set_rate(struct clk *clk, unsigned long rate) { unsigned int epll_con; unsigned int i; if (clk->rate == rate) /* Return if nothing changed */ return 0; epll_con = __raw_readl(S5P_EPLL_CON); epll_con &= ~(PLL65XX_MDIV_MASK | PLL65XX_PDIV_MASK | PLL65XX_SDIV_MASK); for (i = 0; i < ARRAY_SIZE(epll_div); i++) { if (epll_div[i][0] == rate) { epll_con |= (epll_div[i][1] << PLL65XX_MDIV_SHIFT) | (epll_div[i][2] << PLL65XX_PDIV_SHIFT) | (epll_div[i][3] << PLL65XX_SDIV_SHIFT); break; } } if (i == ARRAY_SIZE(epll_div)) { printk(KERN_ERR "%s: Invalid Clock EPLL Frequency\n", __func__); return -EINVAL; } __raw_writel(epll_con, S5P_EPLL_CON); printk(KERN_WARNING "EPLL Rate changes from %lu to %lu\n", clk->rate, rate); clk->rate = rate; return 0; } static struct clk_ops s5pc100_epll_ops = { .get_rate = s5p_epll_get_rate, .set_rate = s5pc100_epll_set_rate, }; static int s5pc100_d0_0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D00, clk, enable); } static int s5pc100_d0_1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D01, clk, enable); } static int s5pc100_d0_2_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D02, clk, enable); } static int s5pc100_d1_0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D10, clk, enable); } static int s5pc100_d1_1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D11, clk, enable); } static int s5pc100_d1_2_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D12, clk, enable); } static int s5pc100_d1_3_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D13, clk, enable); } static int s5pc100_d1_4_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D14, clk, enable); } static int s5pc100_d1_5_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_D15, clk, enable); } static int s5pc100_sclk0_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_SCLK0, clk, enable); } static int s5pc100_sclk1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(S5P_CLKGATE_SCLK1, clk, enable); } /* * The following clocks will be disabled during clock initialization. It is * recommended to keep the following clocks disabled until the driver requests * for enabling the clock. */ static struct clk init_clocks_off[] = { { .name = "cssys", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 6), }, { .name = "secss", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 5), }, { .name = "g2d", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 4), }, { .name = "mdma", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 3), }, { .name = "cfcon", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 2), }, { .name = "nfcon", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 3), }, { .name = "onenandc", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 2), }, { .name = "sdm", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_2_ctrl, .ctrlbit = (1 << 2), }, { .name = "seckey", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_2_ctrl, .ctrlbit = (1 << 1), }, { .name = "hsmmc", .id = 2, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 7), }, { .name = "hsmmc", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 6), }, { .name = "hsmmc", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 5), }, { .name = "modemif", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 4), }, { .name = "otg", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 3), }, { .name = "usbhost", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 2), }, { .name = "pdma", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 1), }, { .name = "pdma", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 0), }, { .name = "lcd", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 0), }, { .name = "rotator", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 1), }, { .name = "fimc", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 2), }, { .name = "fimc", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 3), }, { .name = "fimc", .id = 2, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 4), }, { .name = "jpeg", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 5), }, { .name = "mipi-dsim", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 6), }, { .name = "mipi-csis", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_1_ctrl, .ctrlbit = (1 << 7), }, { .name = "g3d", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_0_ctrl, .ctrlbit = (1 << 8), }, { .name = "tv", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_2_ctrl, .ctrlbit = (1 << 0), }, { .name = "vp", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_2_ctrl, .ctrlbit = (1 << 1), }, { .name = "mixer", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_2_ctrl, .ctrlbit = (1 << 2), }, { .name = "hdmi", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_2_ctrl, .ctrlbit = (1 << 3), }, { .name = "mfc", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_2_ctrl, .ctrlbit = (1 << 4), }, { .name = "apc", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 2), }, { .name = "iec", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 3), }, { .name = "systimer", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 7), }, { .name = "watchdog", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 8), }, { .name = "rtc", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 9), }, { .name = "i2c", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 4), }, { .name = "i2c", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 5), }, { .name = "spi", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 6), }, { .name = "spi", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 7), }, { .name = "spi", .id = 2, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 8), }, { .name = "irda", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 9), }, { .name = "ccan", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 10), }, { .name = "ccan", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 11), }, { .name = "hsitx", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 12), }, { .name = "hsirx", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 13), }, { .name = "iis", .id = 0, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 0), }, { .name = "iis", .id = 1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 1), }, { .name = "iis", .id = 2, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 2), }, { .name = "ac97", .id = -1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 3), }, { .name = "pcm", .id = 0, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 4), }, { .name = "pcm", .id = 1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 5), }, { .name = "spdif", .id = -1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 6), }, { .name = "adc", .id = -1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 7), }, { .name = "keypad", .id = -1, .parent = &clk_div_pclkd1.clk, .enable = s5pc100_d1_5_ctrl, .ctrlbit = (1 << 8), }, { .name = "spi_48m", .id = 0, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 7), }, { .name = "spi_48m", .id = 1, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 8), }, { .name = "spi_48m", .id = 2, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 9), }, { .name = "mmc_48m", .id = 0, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 15), }, { .name = "mmc_48m", .id = 1, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 16), }, { .name = "mmc_48m", .id = 2, .parent = &clk_mout_48m.clk, .enable = s5pc100_sclk0_ctrl, .ctrlbit = (1 << 17), }, }; static struct clk clk_vclk54m = { .name = "vclk_54m", .id = -1, .rate = 54000000, }; static struct clk clk_i2scdclk0 = { .name = "i2s_cdclk0", .id = -1, }; static struct clk clk_i2scdclk1 = { .name = "i2s_cdclk1", .id = -1, }; static struct clk clk_i2scdclk2 = { .name = "i2s_cdclk2", .id = -1, }; static struct clk clk_pcmcdclk0 = { .name = "pcm_cdclk0", .id = -1, }; static struct clk clk_pcmcdclk1 = { .name = "pcm_cdclk1", .id = -1, }; static struct clk *clk_src_group1_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll2.clk, [2] = &clk_fin_epll, [3] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_group1 = { .sources = clk_src_group1_list, .nr_sources = ARRAY_SIZE(clk_src_group1_list), }; static struct clk *clk_src_group2_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, }; struct clksrc_sources clk_src_group2 = { .sources = clk_src_group2_list, .nr_sources = ARRAY_SIZE(clk_src_group2_list), }; static struct clk *clk_src_group3_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, [3] = &clk_i2scdclk0, [4] = &clk_pcmcdclk0, [5] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_group3 = { .sources = clk_src_group3_list, .nr_sources = ARRAY_SIZE(clk_src_group3_list), }; static struct clksrc_clk clk_sclk_audio0 = { .clk = { .name = "sclk_audio", .id = 0, .ctrlbit = (1 << 8), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group3, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 12, .size = 3 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 12, .size = 4 }, }; static struct clk *clk_src_group4_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, [3] = &clk_i2scdclk1, [4] = &clk_pcmcdclk1, [5] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_group4 = { .sources = clk_src_group4_list, .nr_sources = ARRAY_SIZE(clk_src_group4_list), }; static struct clksrc_clk clk_sclk_audio1 = { .clk = { .name = "sclk_audio", .id = 1, .ctrlbit = (1 << 9), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group4, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 16, .size = 3 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 16, .size = 4 }, }; static struct clk *clk_src_group5_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, [3] = &clk_i2scdclk2, [4] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_group5 = { .sources = clk_src_group5_list, .nr_sources = ARRAY_SIZE(clk_src_group5_list), }; static struct clksrc_clk clk_sclk_audio2 = { .clk = { .name = "sclk_audio", .id = 2, .ctrlbit = (1 << 10), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group5, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 20, .size = 3 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 20, .size = 4 }, }; static struct clk *clk_src_group6_list[] = { [0] = &s5p_clk_27m, [1] = &clk_vclk54m, [2] = &clk_div_hdmi.clk, }; struct clksrc_sources clk_src_group6 = { .sources = clk_src_group6_list, .nr_sources = ARRAY_SIZE(clk_src_group6_list), }; static struct clk *clk_src_group7_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_mout_hpll.clk, [3] = &clk_vclk54m, }; struct clksrc_sources clk_src_group7 = { .sources = clk_src_group7_list, .nr_sources = ARRAY_SIZE(clk_src_group7_list), }; static struct clk *clk_src_mmc0_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, }; struct clksrc_sources clk_src_mmc0 = { .sources = clk_src_mmc0_list, .nr_sources = ARRAY_SIZE(clk_src_mmc0_list), }; static struct clk *clk_src_mmc12_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, [3] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_mmc12 = { .sources = clk_src_mmc12_list, .nr_sources = ARRAY_SIZE(clk_src_mmc12_list), }; static struct clk *clk_src_irda_usb_list[] = { [0] = &clk_mout_epll.clk, [1] = &clk_div_mpll.clk, [2] = &clk_fin_epll, [3] = &clk_mout_hpll.clk, }; struct clksrc_sources clk_src_irda_usb = { .sources = clk_src_irda_usb_list, .nr_sources = ARRAY_SIZE(clk_src_irda_usb_list), }; static struct clk *clk_src_pwi_list[] = { [0] = &clk_fin_epll, [1] = &clk_mout_epll.clk, [2] = &clk_div_mpll.clk, }; struct clksrc_sources clk_src_pwi = { .sources = clk_src_pwi_list, .nr_sources = ARRAY_SIZE(clk_src_pwi_list), }; static struct clk *clk_sclk_spdif_list[] = { [0] = &clk_sclk_audio0.clk, [1] = &clk_sclk_audio1.clk, [2] = &clk_sclk_audio2.clk, }; struct clksrc_sources clk_src_sclk_spdif = { .sources = clk_sclk_spdif_list, .nr_sources = ARRAY_SIZE(clk_sclk_spdif_list), }; static int s5pc100_spdif_set_rate(struct clk *clk, unsigned long rate) { struct clk *pclk; int ret; pclk = clk_get_parent(clk); if (IS_ERR(pclk)) return -EINVAL; ret = pclk->ops->set_rate(pclk, rate); clk_put(pclk); return ret; } static unsigned long s5pc100_spdif_get_rate(struct clk *clk) { struct clk *pclk; int rate; pclk = clk_get_parent(clk); if (IS_ERR(pclk)) return -EINVAL; rate = pclk->ops->get_rate(clk); clk_put(pclk); return rate; } static struct clk_ops s5pc100_sclk_spdif_ops = { .set_rate = s5pc100_spdif_set_rate, .get_rate = s5pc100_spdif_get_rate, }; static struct clksrc_clk clk_sclk_spdif = { .clk = { .name = "sclk_spdif", .id = -1, .ctrlbit = (1 << 11), .enable = s5pc100_sclk1_ctrl, .ops = &s5pc100_sclk_spdif_ops, }, .sources = &clk_src_sclk_spdif, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 24, .size = 2 }, }; static struct clksrc_clk clksrcs[] = { { .clk = { .name = "sclk_spi", .id = 0, .ctrlbit = (1 << 4), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_group1, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 4, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 4, .size = 4 }, }, { .clk = { .name = "sclk_spi", .id = 1, .ctrlbit = (1 << 5), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_group1, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 8, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_spi", .id = 2, .ctrlbit = (1 << 6), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_group1, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 12, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 12, .size = 4 }, }, { .clk = { .name = "uclk1", .id = -1, .ctrlbit = (1 << 3), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_group2, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 0, .size = 1 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_mixer", .id = -1, .ctrlbit = (1 << 6), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_group6, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 28, .size = 2 }, }, { .clk = { .name = "sclk_lcd", .id = -1, .ctrlbit = (1 << 0), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group7, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 12, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 12, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 0, .ctrlbit = (1 << 1), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group7, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 16, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 1, .ctrlbit = (1 << 2), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group7, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 20, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, }, { .clk = { .name = "sclk_fimc", .id = 2, .ctrlbit = (1 << 3), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_group7, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 24, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 24, .size = 4 }, }, { .clk = { .name = "sclk_mmc", .id = 0, .ctrlbit = (1 << 12), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_mmc0, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 0, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 0, .size = 4 }, }, { .clk = { .name = "sclk_mmc", .id = 1, .ctrlbit = (1 << 13), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_mmc12, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 4, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 4, .size = 4 }, }, { .clk = { .name = "sclk_mmc", .id = 2, .ctrlbit = (1 << 14), .enable = s5pc100_sclk1_ctrl, }, .sources = &clk_src_mmc12, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_irda", .id = 2, .ctrlbit = (1 << 10), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_irda_usb, .reg_src = { .reg = S5P_CLK_SRC2, .shift = 8, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV3, .shift = 8, .size = 4 }, }, { .clk = { .name = "sclk_irda", .id = -1, .ctrlbit = (1 << 10), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_mmc12, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 16, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 16, .size = 4 }, }, { .clk = { .name = "sclk_pwi", .id = -1, .ctrlbit = (1 << 1), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_pwi, .reg_src = { .reg = S5P_CLK_SRC3, .shift = 0, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV4, .shift = 0, .size = 3 }, }, { .clk = { .name = "sclk_uhost", .id = -1, .ctrlbit = (1 << 11), .enable = s5pc100_sclk0_ctrl, }, .sources = &clk_src_irda_usb, .reg_src = { .reg = S5P_CLK_SRC1, .shift = 20, .size = 2 }, .reg_div = { .reg = S5P_CLK_DIV2, .shift = 20, .size = 4 }, }, }; /* Clock initialisation code */ static struct clksrc_clk *sysclks[] = { &clk_mout_apll, &clk_mout_epll, &clk_mout_mpll, &clk_mout_hpll, &clk_mout_href, &clk_mout_48m, &clk_div_apll, &clk_div_arm, &clk_div_d0_bus, &clk_div_pclkd0, &clk_div_secss, &clk_div_apll2, &clk_mout_am, &clk_div_d1_bus, &clk_div_mpll2, &clk_div_mpll, &clk_mout_onenand, &clk_div_onenand, &clk_div_pclkd1, &clk_div_cam, &clk_div_hdmi, &clk_sclk_audio0, &clk_sclk_audio1, &clk_sclk_audio2, &clk_sclk_spdif, }; void __init_or_cpufreq s5pc100_setup_clocks(void) { unsigned long xtal; unsigned long arm; unsigned long hclkd0; unsigned long hclkd1; unsigned long pclkd0; unsigned long pclkd1; unsigned long apll; unsigned long mpll; unsigned long epll; unsigned long hpll; unsigned int ptr; /* Set S5PC100 functions for clk_fout_epll */ clk_fout_epll.enable = s5p_epll_enable; clk_fout_epll.ops = &s5pc100_epll_ops; printk(KERN_DEBUG "%s: registering clocks\n", __func__); xtal = clk_get_rate(&clk_xtal); printk(KERN_DEBUG "%s: xtal is %ld\n", __func__, xtal); apll = s5p_get_pll65xx(xtal, __raw_readl(S5P_APLL_CON)); mpll = s5p_get_pll65xx(xtal, __raw_readl(S5P_MPLL_CON)); epll = s5p_get_pll65xx(xtal, __raw_readl(S5P_EPLL_CON)); hpll = s5p_get_pll65xx(xtal, __raw_readl(S5P_HPLL_CON)); printk(KERN_INFO "S5PC100: PLL settings, A=%ld.%ldMHz, M=%ld.%ldMHz, E=%ld.%ldMHz, H=%ld.%ldMHz\n", print_mhz(apll), print_mhz(mpll), print_mhz(epll), print_mhz(hpll)); clk_fout_apll.rate = apll; clk_fout_mpll.rate = mpll; clk_fout_epll.rate = epll; clk_mout_hpll.clk.rate = hpll; for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_set_clksrc(&clksrcs[ptr], true); arm = clk_get_rate(&clk_div_arm.clk); hclkd0 = clk_get_rate(&clk_div_d0_bus.clk); pclkd0 = clk_get_rate(&clk_div_pclkd0.clk); hclkd1 = clk_get_rate(&clk_div_d1_bus.clk); pclkd1 = clk_get_rate(&clk_div_pclkd1.clk); printk(KERN_INFO "S5PC100: HCLKD0=%ld.%ldMHz, HCLKD1=%ld.%ldMHz, PCLKD0=%ld.%ldMHz, PCLKD1=%ld.%ldMHz\n", print_mhz(hclkd0), print_mhz(hclkd1), print_mhz(pclkd0), print_mhz(pclkd1)); clk_f.rate = arm; clk_h.rate = hclkd1; clk_p.rate = pclkd1; } /* * The following clocks will be enabled during clock initialization. */ static struct clk init_clocks[] = { { .name = "tzic", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 1), }, { .name = "intc", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_0_ctrl, .ctrlbit = (1 << 0), }, { .name = "ebi", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 5), }, { .name = "intmem", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 4), }, { .name = "sromc", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 1), }, { .name = "dmc", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 0), }, { .name = "chipid", .id = -1, .parent = &clk_div_d0_bus.clk, .enable = s5pc100_d0_1_ctrl, .ctrlbit = (1 << 0), }, { .name = "gpio", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 1), }, { .name = "uart", .id = 0, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 0), }, { .name = "uart", .id = 1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 1), }, { .name = "uart", .id = 2, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 2), }, { .name = "uart", .id = 3, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_4_ctrl, .ctrlbit = (1 << 3), }, { .name = "timers", .id = -1, .parent = &clk_div_d1_bus.clk, .enable = s5pc100_d1_3_ctrl, .ctrlbit = (1 << 6), }, }; static struct clk *clks[] __initdata = { &clk_ext, &clk_i2scdclk0, &clk_i2scdclk1, &clk_i2scdclk2, &clk_pcmcdclk0, &clk_pcmcdclk1, }; void __init s5pc100_register_clocks(void) { int ptr; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_pwmclk_init(); }
gpl-2.0
TeamOrion-Devices/Orion_kernel_motorola_msm8226
arch/x86/kernel/cpu/mcheck/mce-severity.c
2013
7272
/* * MCE grading rules. * Copyright 2008, 2009 Intel Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Author: Andi Kleen */ #include <linux/kernel.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/debugfs.h> #include <asm/mce.h> #include "mce-internal.h" /* * Grade an mce by severity. In general the most severe ones are processed * first. Since there are quite a lot of combinations test the bits in a * table-driven way. The rules are simply processed in order, first * match wins. * * Note this is only used for machine check exceptions, the corrected * errors use much simpler rules. The exceptions still check for the corrected * errors, but only to leave them alone for the CMCI handler (except for * panic situations) */ enum context { IN_KERNEL = 1, IN_USER = 2 }; enum ser { SER_REQUIRED = 1, NO_SER = 2 }; static struct severity { u64 mask; u64 result; unsigned char sev; unsigned char mcgmask; unsigned char mcgres; unsigned char ser; unsigned char context; unsigned char covered; char *msg; } severities[] = { #define MCESEV(s, m, c...) { .sev = MCE_ ## s ## _SEVERITY, .msg = m, ## c } #define KERNEL .context = IN_KERNEL #define USER .context = IN_USER #define SER .ser = SER_REQUIRED #define NOSER .ser = NO_SER #define BITCLR(x) .mask = x, .result = 0 #define BITSET(x) .mask = x, .result = x #define MCGMASK(x, y) .mcgmask = x, .mcgres = y #define MASK(x, y) .mask = x, .result = y #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) #define MCACOD 0xffff /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ #define MCACOD_SCRUBMSK 0xfff0 #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ #define MCACOD_DATA 0x0134 /* Data Load */ #define MCACOD_INSTR 0x0150 /* Instruction Fetch */ MCESEV( NO, "Invalid", BITCLR(MCI_STATUS_VAL) ), MCESEV( NO, "Not enabled", BITCLR(MCI_STATUS_EN) ), MCESEV( PANIC, "Processor context corrupt", BITSET(MCI_STATUS_PCC) ), /* When MCIP is not set something is very confused */ MCESEV( PANIC, "MCIP not set in MCA handler", MCGMASK(MCG_STATUS_MCIP, 0) ), /* Neither return not error IP -- no chance to recover -> PANIC */ MCESEV( PANIC, "Neither restart nor error IP", MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, 0) ), MCESEV( PANIC, "In kernel and no restart IP", KERNEL, MCGMASK(MCG_STATUS_RIPV, 0) ), MCESEV( KEEP, "Corrected error", NOSER, BITCLR(MCI_STATUS_UC) ), /* ignore OVER for UCNA */ MCESEV( KEEP, "Uncorrected no action required", SER, MASK(MCI_UC_SAR, MCI_STATUS_UC) ), MCESEV( PANIC, "Illegal combination (UCNA with AR=1)", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR) ), MCESEV( KEEP, "Non signalled machine check", SER, BITCLR(MCI_STATUS_S) ), MCESEV( PANIC, "Action required with lost events", SER, BITSET(MCI_STATUS_OVER|MCI_UC_SAR) ), /* known AR MCACODs: */ #ifdef CONFIG_MEMORY_FAILURE MCESEV( KEEP, "HT thread notices Action required: data load error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), MCGMASK(MCG_STATUS_EIPV, 0) ), MCESEV( AR, "Action required: data load error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), USER ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR) ), /* known AO MCACODs: */ MCESEV( AO, "Action optional: memory scrubbing error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB) ), MCESEV( AO, "Action optional: last level cache writeback error", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB) ), MCESEV( SOME, "Action optional: unknown MCACOD", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S) ), MCESEV( SOME, "Action optional with lost events", SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_OVER|MCI_UC_S) ), MCESEV( PANIC, "Overflowed uncorrected", BITSET(MCI_STATUS_OVER|MCI_STATUS_UC) ), MCESEV( UC, "Uncorrected", BITSET(MCI_STATUS_UC) ), MCESEV( SOME, "No match", BITSET(0) ) /* always matches. keep at end */ }; /* * If mcgstatus indicated that ip/cs on the stack were * no good, then "m->cs" will be zero and we will have * to assume the worst case (IN_KERNEL) as we actually * have no idea what we were executing when the machine * check hit. * If we do have a good "m->cs" (or a faked one in the * case we were executing in VM86 mode) we can use it to * distinguish an exception taken in user from from one * taken in the kernel. */ static int error_context(struct mce *m) { return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL; } int mce_severity(struct mce *m, int tolerant, char **msg) { enum context ctx = error_context(m); struct severity *s; for (s = severities;; s++) { if ((m->status & s->mask) != s->result) continue; if ((m->mcgstatus & s->mcgmask) != s->mcgres) continue; if (s->ser == SER_REQUIRED && !mce_ser) continue; if (s->ser == NO_SER && mce_ser) continue; if (s->context && ctx != s->context) continue; if (msg) *msg = s->msg; s->covered = 1; if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) { if (panic_on_oops || tolerant < 1) return MCE_PANIC_SEVERITY; } return s->sev; } } #ifdef CONFIG_DEBUG_FS static void *s_start(struct seq_file *f, loff_t *pos) { if (*pos >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void *s_next(struct seq_file *f, void *data, loff_t *pos) { if (++(*pos) >= ARRAY_SIZE(severities)) return NULL; return &severities[*pos]; } static void s_stop(struct seq_file *f, void *data) { } static int s_show(struct seq_file *f, void *data) { struct severity *ser = data; seq_printf(f, "%d\t%s\n", ser->covered, ser->msg); return 0; } static const struct seq_operations severities_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int severities_coverage_open(struct inode *inode, struct file *file) { return seq_open(file, &severities_seq_ops); } static ssize_t severities_coverage_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { int i; for (i = 0; i < ARRAY_SIZE(severities); i++) severities[i].covered = 0; return count; } static const struct file_operations severities_coverage_fops = { .open = severities_coverage_open, .release = seq_release, .read = seq_read, .write = severities_coverage_write, .llseek = seq_lseek, }; static int __init severities_debugfs_init(void) { struct dentry *dmce, *fsev; dmce = mce_get_debugfs_dir(); if (!dmce) goto err_out; fsev = debugfs_create_file("severities-coverage", 0444, dmce, NULL, &severities_coverage_fops); if (!fsev) goto err_out; return 0; err_out: return -ENOMEM; } late_initcall(severities_debugfs_init); #endif /* CONFIG_DEBUG_FS */
gpl-2.0
Xanwar/android_kernel_asus_a400cg
drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
2269
3065
/* * Copyright (C) 2010 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <subdev/fb.h> struct nv10_fb_priv { struct nouveau_fb base; }; static int nv10_fb_vram_init(struct nouveau_fb *pfb) { u32 cfg0 = nv_rd32(pfb, 0x100200); if (cfg0 & 0x00000001) pfb->ram.type = NV_MEM_TYPE_DDR1; else pfb->ram.type = NV_MEM_TYPE_SDRAM; pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000; return 0; } void nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *tile) { tile->addr = 0x80000000 | addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; } void nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { tile->addr = 0; tile->limit = 0; tile->pitch = 0; tile->zcomp = 0; } void nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) { nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); nv_rd32(pfb, 0x100240 + (i * 0x10)); } static int nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv10_fb_priv *priv; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.memtype_valid = nv04_fb_memtype_valid; priv->base.ram.init = nv10_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv10_fb_tile_init; priv->base.tile.fini = nv10_fb_tile_fini; priv->base.tile.prog = nv10_fb_tile_prog; return nouveau_fb_preinit(&priv->base); } struct nouveau_oclass nv10_fb_oclass = { .handle = NV_SUBDEV(FB, 0x10), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv10_fb_ctor, .dtor = _nouveau_fb_dtor, .init = _nouveau_fb_init, .fini = _nouveau_fb_fini, }, };
gpl-2.0
danil39/Alto45
crypto/cast5_generic.c
2269
21338
/* Kernel cryptographic api. * cast5.c - Cast5 cipher algorithm (rfc2144). * * Derived from GnuPG implementation of cast5. * * Major Changes. * Complete conformance to rfc2144. * Supports key size from 40 to 128 bits. * * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. * Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>. * * This program is free software; you can redistribute it and/or modify it * under the terms of GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <asm/byteorder.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <crypto/cast5.h> static const u32 s5[256] = { 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4 }; static const u32 s6[256] = { 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f }; static const u32 s7[256] = { 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3 }; static const u32 sb8[256] = { 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e }; #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 #define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) #define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) #define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; /* used by the Fx macros */ u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) */ l = be32_to_cpu(src[0]); r = be32_to_cpu(src[1]); /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: * Li = Ri-1; * Ri = Li-1 ^ f(Ri-1,Kmi,Kri), where f is defined in Section 2.2 * Rounds 1, 4, 7, 10, 13, and 16 use f function Type 1. * Rounds 2, 5, 8, 11, and 14 use f function Type 2. * Rounds 3, 6, 9, 12, and 15 use f function Type 3. */ t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); } /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and * concatenate to form the ciphertext.) */ dst[0] = cpu_to_be32(r); dst[1] = cpu_to_be32(l); } EXPORT_SYMBOL_GPL(__cast5_encrypt); static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast5_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; u32 I; u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; l = be32_to_cpu(src[0]); r = be32_to_cpu(src[1]); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); } t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); dst[0] = cpu_to_be32(r); dst[1] = cpu_to_be32(l); } EXPORT_SYMBOL_GPL(__cast5_decrypt); static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast5_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } static void key_schedule(u32 *x, u32 *z, u32 *k) { #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) #define zi(i) ((z[(i)/4] >> (8*(3-((i)%4)))) & 0xff) z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)]; k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s6[zi(6)]; k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^ s7[zi(9)]; k[3] = s5[zi(14)] ^ s6[zi(15)] ^ s7[zi(1)] ^ sb8[zi(0)] ^ sb8[zi(12)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[4] = s5[xi(3)] ^ s6[xi(2)] ^ s7[xi(12)] ^ sb8[xi(13)] ^ s5[xi(8)]; k[5] = s5[xi(1)] ^ s6[xi(0)] ^ s7[xi(14)] ^ sb8[xi(15)] ^ s6[xi(13)]; k[6] = s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(8)] ^ sb8[xi(9)] ^ s7[xi(3)]; k[7] = s5[xi(5)] ^ s6[xi(4)] ^ s7[xi(10)] ^ sb8[xi(11)] ^ sb8[xi(7)]; z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[8] = s5[zi(3)] ^ s6[zi(2)] ^ s7[zi(12)] ^ sb8[zi(13)] ^ s5[zi(9)]; k[9] = s5[zi(1)] ^ s6[zi(0)] ^ s7[zi(14)] ^ sb8[zi(15)] ^ s6[zi(12)]; k[10] = s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(8)] ^ sb8[zi(9)] ^ s7[zi(2)]; k[11] = s5[zi(5)] ^ s6[zi(4)] ^ s7[zi(10)] ^ sb8[zi(11)] ^ sb8[zi(6)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[12] = s5[xi(8)] ^ s6[xi(9)] ^ s7[xi(7)] ^ sb8[xi(6)] ^ s5[xi(3)]; k[13] = s5[xi(10)] ^ s6[xi(11)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s6[xi(7)]; k[14] = s5[xi(12)] ^ s6[xi(13)] ^ s7[xi(3)] ^ sb8[xi(2)] ^ s7[xi(8)]; k[15] = s5[xi(14)] ^ s6[xi(15)] ^ s7[xi(1)] ^ sb8[xi(0)] ^ sb8[xi(13)]; #undef xi #undef zi } int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); int i; u32 x[4]; u32 z[4]; u32 k[16]; __be32 p_key[4]; c->rr = key_len <= 10 ? 1 : 0; memset(p_key, 0, 16); memcpy(p_key, key, key_len); x[0] = be32_to_cpu(p_key[0]); x[1] = be32_to_cpu(p_key[1]); x[2] = be32_to_cpu(p_key[2]); x[3] = be32_to_cpu(p_key[3]); key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Km[i] = k[i]; key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Kr[i] = k[i] & 0x1f; return 0; } EXPORT_SYMBOL_GPL(cast5_setkey); static struct crypto_alg alg = { .cra_name = "cast5", .cra_driver_name = "cast5-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST5_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast5_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAST5_MIN_KEY_SIZE, .cia_max_keysize = CAST5_MAX_KEY_SIZE, .cia_setkey = cast5_setkey, .cia_encrypt = cast5_encrypt, .cia_decrypt = cast5_decrypt } } }; static int __init cast5_mod_init(void) { return crypto_register_alg(&alg); } static void __exit cast5_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); MODULE_ALIAS("cast5");
gpl-2.0
MoKee/android_kernel_zte_nx511j
drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
2269
2662
/* * Copyright (C) 2010 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <subdev/fb.h> struct nv25_fb_priv { struct nouveau_fb base; }; static void nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags, struct nouveau_fb_tile *tile) { u32 tiles = DIV_ROUND_UP(size, 0x40); u32 tags = round_up(tiles / pfb->ram.parts, 0x40); if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) { if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */ else tile->zcomp = 0x00200000; /* Z24S8 */ tile->zcomp |= tile->tag->offset; #ifdef __BIG_ENDIAN tile->zcomp |= 0x01000000; #endif } } static int nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv25_fb_priv *priv; int ret; ret = nouveau_fb_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; priv->base.memtype_valid = nv04_fb_memtype_valid; priv->base.ram.init = nv20_fb_vram_init; priv->base.tile.regions = 8; priv->base.tile.init = nv20_fb_tile_init; priv->base.tile.comp = nv25_fb_tile_comp; priv->base.tile.fini = nv20_fb_tile_fini; priv->base.tile.prog = nv20_fb_tile_prog; return nouveau_fb_preinit(&priv->base); } struct nouveau_oclass nv25_fb_oclass = { .handle = NV_SUBDEV(FB, 0x25), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv25_fb_ctor, .dtor = _nouveau_fb_dtor, .init = _nouveau_fb_init, .fini = _nouveau_fb_fini, }, };
gpl-2.0
nikhil16242/Prometheus_kernel_golf
net/8021q/vlanproc.c
2525
8741
/****************************************************************************** * vlanproc.c VLAN Module. /proc filesystem interface. * * This module is completely hardware-independent and provides * access to the router using Linux /proc filesystem. * * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c * by: Gene Kozin <genek@compuserve.com> * * Copyright: (c) 1998 Ben Greear * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * ============================================================================ * Jan 20, 1998 Ben Greear Initial Version *****************************************************************************/ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "vlanproc.h" #include "vlan.h" /****** Function Prototypes *************************************************/ /* Methods for preparing data for reading proc entries */ static int vlan_seq_show(struct seq_file *seq, void *v); static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); static void vlan_seq_stop(struct seq_file *seq, void *); static int vlandev_seq_show(struct seq_file *seq, void *v); /* * Global Data */ /* * Names of the proc directory entries */ static const char name_root[] = "vlan"; static const char name_conf[] = "config"; /* * Structures for interfacing with the /proc filesystem. * VLAN creates its own directory /proc/net/vlan with the following * entries: * config device status/configuration * <device> entry for each device */ /* * Generic /proc/net/vlan/<file> file and inode operations */ static const struct seq_operations vlan_seq_ops = { .start = vlan_seq_start, .next = vlan_seq_next, .stop = vlan_seq_stop, .show = vlan_seq_show, }; static int vlan_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &vlan_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations vlan_fops = { .owner = THIS_MODULE, .open = vlan_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; /* * /proc/net/vlan/<device> file and inode operations */ static int vlandev_seq_open(struct inode *inode, struct file *file) { return single_open(file, vlandev_seq_show, PDE(inode)->data); } static const struct file_operations vlandev_fops = { .owner = THIS_MODULE, .open = vlandev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Proc filesystem derectory entries. */ /* Strings */ static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", }; /* * Interface functions */ /* * Clean up /proc/net/vlan entries */ void vlan_proc_cleanup(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); if (vn->proc_vlan_conf) remove_proc_entry(name_conf, vn->proc_vlan_dir); if (vn->proc_vlan_dir) proc_net_remove(net, name_root); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... */ } /* * Create /proc/net/vlan entries */ int __net_init vlan_proc_init(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); if (!vn->proc_vlan_dir) goto err; vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR, vn->proc_vlan_dir, &vlan_fops); if (!vn->proc_vlan_conf) goto err; return 0; err: pr_err("%s: can't create entry in proc filesystem!\n", __func__); vlan_proc_cleanup(net); return -ENOBUFS; } /* * Add directory entry for VLAN device. */ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); dev_info->dent = proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, vn->proc_vlan_dir, &vlandev_fops, vlandev); if (!dev_info->dent) return -ENOBUFS; return 0; } /* * Delete directory entry for VLAN device. */ int vlan_proc_rem_dev(struct net_device *vlandev) { struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); /** NOTE: This will consume the memory pointed to by dent, it seems. */ if (vlan_dev_info(vlandev)->dent) { remove_proc_entry(vlan_dev_info(vlandev)->dent->name, vn->proc_vlan_dir); vlan_dev_info(vlandev)->dent = NULL; } return 0; } /****** Proc filesystem entry points ****************************************/ /* * The following few functions build the content of /proc/net/vlan/config */ /* start read of /proc/net/vlan/config */ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { struct net_device *dev; struct net *net = seq_file_net(seq); loff_t i = 1; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; for_each_netdev_rcu(net, dev) { if (!is_vlan_dev(dev)) continue; if (i++ == *pos) return dev; } return NULL; } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; struct net *net = seq_file_net(seq); ++*pos; dev = (struct net_device *)v; if (v == SEQ_START_TOKEN) dev = net_device_entry(&net->dev_base_head); for_each_netdev_continue_rcu(net, dev) { if (!is_vlan_dev(dev)) continue; return dev; } return NULL; } static void vlan_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int vlan_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); struct vlan_net *vn = net_generic(net, vlan_net_id); if (v == SEQ_START_TOKEN) { const char *nmtype = NULL; seq_puts(seq, "VLAN Dev name | VLAN ID\n"); if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) nmtype = vlan_name_type_str[vn->name_type]; seq_printf(seq, "Name-Type: %s\n", nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, dev_info->vlan_id, dev_info->real_dev->name); } return 0; } static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", vlandev->name, dev_info->vlan_id, (int)(dev_info->flags & 1), vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, "Device: %s", dev_info->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", dev_info->ingress_priority_map[0], dev_info->ingress_priority_map[1], dev_info->ingress_priority_map[2], dev_info->ingress_priority_map[3], dev_info->ingress_priority_map[4], dev_info->ingress_priority_map[5], dev_info->ingress_priority_map[6], dev_info->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = dev_info->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%hu ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
gpl-2.0
assusdan/cyanogenmod_kernel_hs_puref
drivers/leds/trigger/ledtrig-gpio.c
2781
6267
/* * ledtrig-gio.c - LED Trigger Based on GPIO events * * Copyright 2009 Felipe Balbi <me@felipebalbi.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/leds.h> #include <linux/slab.h> #include "../leds.h" struct gpio_trig_data { struct led_classdev *led; struct work_struct work; unsigned desired_brightness; /* desired brightness when led is on */ unsigned inverted; /* true when gpio is inverted */ unsigned gpio; /* gpio that triggers the leds */ }; static irqreturn_t gpio_trig_irq(int irq, void *_led) { struct led_classdev *led = _led; struct gpio_trig_data *gpio_data = led->trigger_data; /* just schedule_work since gpio_get_value can sleep */ schedule_work(&gpio_data->work); return IRQ_HANDLED; }; static void gpio_trig_work(struct work_struct *work) { struct gpio_trig_data *gpio_data = container_of(work, struct gpio_trig_data, work); int tmp; if (!gpio_data->gpio) return; tmp = gpio_get_value(gpio_data->gpio); if (gpio_data->inverted) tmp = !tmp; if (tmp) { if (gpio_data->desired_brightness) __led_set_brightness(gpio_data->led, gpio_data->desired_brightness); else __led_set_brightness(gpio_data->led, LED_FULL); } else { __led_set_brightness(gpio_data->led, LED_OFF); } } static ssize_t gpio_trig_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; return sprintf(buf, "%u\n", gpio_data->desired_brightness); } static ssize_t gpio_trig_brightness_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; unsigned desired_brightness; int ret; ret = sscanf(buf, "%u", &desired_brightness); if (ret < 1 || desired_brightness > 255) { dev_err(dev, "invalid value\n"); return -EINVAL; } gpio_data->desired_brightness = desired_brightness; return n; } static DEVICE_ATTR(desired_brightness, 0644, gpio_trig_brightness_show, gpio_trig_brightness_store); static ssize_t gpio_trig_inverted_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; return sprintf(buf, "%u\n", gpio_data->inverted); } static ssize_t gpio_trig_inverted_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; unsigned long inverted; int ret; ret = kstrtoul(buf, 10, &inverted); if (ret < 0) return ret; if (inverted > 1) return -EINVAL; gpio_data->inverted = inverted; /* After inverting, we need to update the LED. */ schedule_work(&gpio_data->work); return n; } static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, gpio_trig_inverted_store); static ssize_t gpio_trig_gpio_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; return sprintf(buf, "%u\n", gpio_data->gpio); } static ssize_t gpio_trig_gpio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct led_classdev *led = dev_get_drvdata(dev); struct gpio_trig_data *gpio_data = led->trigger_data; unsigned gpio; int ret; ret = sscanf(buf, "%u", &gpio); if (ret < 1) { dev_err(dev, "couldn't read gpio number\n"); flush_work(&gpio_data->work); return -EINVAL; } if (gpio_data->gpio == gpio) return n; if (!gpio) { if (gpio_data->gpio != 0) free_irq(gpio_to_irq(gpio_data->gpio), led); gpio_data->gpio = 0; return n; } ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); if (ret) { dev_err(dev, "request_irq failed with error %d\n", ret); } else { if (gpio_data->gpio != 0) free_irq(gpio_to_irq(gpio_data->gpio), led); gpio_data->gpio = gpio; } return ret ? ret : n; } static DEVICE_ATTR(gpio, 0644, gpio_trig_gpio_show, gpio_trig_gpio_store); static void gpio_trig_activate(struct led_classdev *led) { struct gpio_trig_data *gpio_data; int ret; gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL); if (!gpio_data) return; ret = device_create_file(led->dev, &dev_attr_gpio); if (ret) goto err_gpio; ret = device_create_file(led->dev, &dev_attr_inverted); if (ret) goto err_inverted; ret = device_create_file(led->dev, &dev_attr_desired_brightness); if (ret) goto err_brightness; gpio_data->led = led; led->trigger_data = gpio_data; INIT_WORK(&gpio_data->work, gpio_trig_work); led->activated = true; return; err_brightness: device_remove_file(led->dev, &dev_attr_inverted); err_inverted: device_remove_file(led->dev, &dev_attr_gpio); err_gpio: kfree(gpio_data); } static void gpio_trig_deactivate(struct led_classdev *led) { struct gpio_trig_data *gpio_data = led->trigger_data; if (led->activated) { device_remove_file(led->dev, &dev_attr_gpio); device_remove_file(led->dev, &dev_attr_inverted); device_remove_file(led->dev, &dev_attr_desired_brightness); flush_work(&gpio_data->work); if (gpio_data->gpio != 0) free_irq(gpio_to_irq(gpio_data->gpio), led); kfree(gpio_data); led->activated = false; } } static struct led_trigger gpio_led_trigger = { .name = "gpio", .activate = gpio_trig_activate, .deactivate = gpio_trig_deactivate, }; static int __init gpio_trig_init(void) { return led_trigger_register(&gpio_led_trigger); } module_init(gpio_trig_init); static void __exit gpio_trig_exit(void) { led_trigger_unregister(&gpio_led_trigger); } module_exit(gpio_trig_exit); MODULE_AUTHOR("Felipe Balbi <me@felipebalbi.com>"); MODULE_DESCRIPTION("GPIO LED trigger"); MODULE_LICENSE("GPL");
gpl-2.0
cmartinbaughman/shooter-ics-sense
drivers/scsi/mesh.c
3037
53931
/* * SCSI low-level driver for the MESH (Macintosh Enhanced SCSI Hardware) * bus adaptor found on Power Macintosh computers. * We assume the MESH is connected to a DBDMA (descriptor-based DMA) * controller. * * Paul Mackerras, August 1996. * Copyright (C) 1996 Paul Mackerras. * * Apr. 21 2002 - BenH Rework bus reset code for new error handler * Add delay after initial bus reset * Add module parameters * * Sep. 27 2003 - BenH Move to new driver model, fix some write posting * issues * To do: * - handle aborts correctly * - retry arbitration if lost (unless higher levels do this for us) * - power down the chip when no device is detected */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/interrupt.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/irq.h> #include <asm/hydra.h> #include <asm/processor.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> #include <asm/macio.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "mesh.h" #if 1 #undef KERN_DEBUG #define KERN_DEBUG KERN_WARNING #endif MODULE_AUTHOR("Paul Mackerras (paulus@samba.org)"); MODULE_DESCRIPTION("PowerMac MESH SCSI driver"); MODULE_LICENSE("GPL"); static int sync_rate = CONFIG_SCSI_MESH_SYNC_RATE; static int sync_targets = 0xff; static int resel_targets = 0xff; static int debug_targets = 0; /* print debug for these targets */ static int init_reset_delay = CONFIG_SCSI_MESH_RESET_DELAY_MS; module_param(sync_rate, int, 0); MODULE_PARM_DESC(sync_rate, "Synchronous rate (0..10, 0=async)"); module_param(sync_targets, int, 0); MODULE_PARM_DESC(sync_targets, "Bitmask of targets allowed to set synchronous"); module_param(resel_targets, int, 0); MODULE_PARM_DESC(resel_targets, "Bitmask of targets allowed to set disconnect"); module_param(debug_targets, int, 0644); MODULE_PARM_DESC(debug_targets, "Bitmask of debugged targets"); module_param(init_reset_delay, int, 0); MODULE_PARM_DESC(init_reset_delay, "Initial bus reset delay (0=no reset)"); static int mesh_sync_period = 100; static int mesh_sync_offset = 0; static unsigned char use_active_neg = 0; /* bit mask for SEQ_ACTIVE_NEG if used */ #define ALLOW_SYNC(tgt) ((sync_targets >> (tgt)) & 1) #define ALLOW_RESEL(tgt) ((resel_targets >> (tgt)) & 1) #define ALLOW_DEBUG(tgt) ((debug_targets >> (tgt)) & 1) #define DEBUG_TARGET(cmd) ((cmd) && ALLOW_DEBUG((cmd)->device->id)) #undef MESH_DBG #define N_DBG_LOG 50 #define N_DBG_SLOG 20 #define NUM_DBG_EVENTS 13 #undef DBG_USE_TB /* bombs on 601 */ struct dbglog { char *fmt; u32 tb; u8 phase; u8 bs0; u8 bs1; u8 tgt; int d; }; enum mesh_phase { idle, arbitrating, selecting, commanding, dataing, statusing, busfreeing, disconnecting, reselecting, sleeping }; enum msg_phase { msg_none, msg_out, msg_out_xxx, msg_out_last, msg_in, msg_in_bad, }; enum sdtr_phase { do_sdtr, sdtr_sent, sdtr_done }; struct mesh_target { enum sdtr_phase sdtr_state; int sync_params; int data_goes_out; /* guess as to data direction */ struct scsi_cmnd *current_req; u32 saved_ptr; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_LOG]; #endif }; struct mesh_state { volatile struct mesh_regs __iomem *mesh; int meshintr; volatile struct dbdma_regs __iomem *dma; int dmaintr; struct Scsi_Host *host; struct mesh_state *next; struct scsi_cmnd *request_q; struct scsi_cmnd *request_qtail; enum mesh_phase phase; /* what we're currently trying to do */ enum msg_phase msgphase; int conn_tgt; /* target we're connected to */ struct scsi_cmnd *current_req; /* req we're currently working on */ int data_ptr; int dma_started; int dma_count; int stat; int aborting; int expect_reply; int n_msgin; u8 msgin[16]; int n_msgout; int last_n_msgout; u8 msgout[16]; struct dbdma_cmd *dma_cmds; /* space for dbdma commands, aligned */ dma_addr_t dma_cmd_bus; void *dma_cmd_space; int dma_cmd_size; int clk_freq; struct mesh_target tgts[8]; struct macio_dev *mdev; struct pci_dev* pdev; #ifdef MESH_DBG int log_ix; int n_log; struct dbglog log[N_DBG_SLOG]; #endif }; /* * Driver is too messy, we need a few prototypes... */ static void mesh_done(struct mesh_state *ms, int start_next); static void mesh_interrupt(struct mesh_state *ms); static void cmd_complete(struct mesh_state *ms); static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd); static void halt_dma(struct mesh_state *ms); static void phase_mismatch(struct mesh_state *ms); /* * Some debugging & logging routines */ #ifdef MESH_DBG static inline u32 readtb(void) { u32 tb; #ifdef DBG_USE_TB /* Beware: if you enable this, it will crash on 601s. */ asm ("mftb %0" : "=r" (tb) : ); #else tb = 0; #endif return tb; } static void dlog(struct mesh_state *ms, char *fmt, int a) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; struct dbglog *tlp, *slp; tlp = &tp->log[tp->log_ix]; slp = &ms->log[ms->log_ix]; tlp->fmt = fmt; tlp->tb = readtb(); tlp->phase = (ms->msgphase << 4) + ms->phase; tlp->bs0 = ms->mesh->bus_status0; tlp->bs1 = ms->mesh->bus_status1; tlp->tgt = ms->conn_tgt; tlp->d = a; *slp = *tlp; if (++tp->log_ix >= N_DBG_LOG) tp->log_ix = 0; if (tp->n_log < N_DBG_LOG) ++tp->n_log; if (++ms->log_ix >= N_DBG_SLOG) ms->log_ix = 0; if (ms->n_log < N_DBG_SLOG) ++ms->n_log; } static void dumplog(struct mesh_state *ms, int t) { struct mesh_target *tp = &ms->tgts[t]; struct dbglog *lp; int i; if (tp->n_log == 0) return; i = tp->log_ix - tp->n_log; if (i < 0) i += N_DBG_LOG; tp->n_log = 0; do { lp = &tp->log[i]; printk(KERN_DEBUG "mesh log %d: bs=%.2x%.2x ph=%.2x ", t, lp->bs1, lp->bs0, lp->phase); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_LOG) i = 0; } while (i != tp->log_ix); } static void dumpslog(struct mesh_state *ms) { struct dbglog *lp; int i; if (ms->n_log == 0) return; i = ms->log_ix - ms->n_log; if (i < 0) i += N_DBG_SLOG; ms->n_log = 0; do { lp = &ms->log[i]; printk(KERN_DEBUG "mesh log: bs=%.2x%.2x ph=%.2x t%d ", lp->bs1, lp->bs0, lp->phase, lp->tgt); #ifdef DBG_USE_TB printk("tb=%10u ", lp->tb); #endif printk(lp->fmt, lp->d); printk("\n"); if (++i >= N_DBG_SLOG) i = 0; } while (i != ms->log_ix); } #else static inline void dlog(struct mesh_state *ms, char *fmt, int a) {} static inline void dumplog(struct mesh_state *ms, int tgt) {} static inline void dumpslog(struct mesh_state *ms) {} #endif /* MESH_DBG */ #define MKWORD(a, b, c, d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) static void mesh_dump_regs(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; int t; struct mesh_target *tp; printk(KERN_DEBUG "mesh: state at %p, regs at %p, dma at %p\n", ms, mr, md); printk(KERN_DEBUG " ct=%4x seq=%2x bs=%4x fc=%2x " "exc=%2x err=%2x im=%2x int=%2x sp=%2x\n", (mr->count_hi << 8) + mr->count_lo, mr->sequence, (mr->bus_status1 << 8) + mr->bus_status0, mr->fifo_count, mr->exception, mr->error, mr->intr_mask, mr->interrupt, mr->sync_params); while(in_8(&mr->fifo_count)) printk(KERN_DEBUG " fifo data=%.2x\n",in_8(&mr->fifo)); printk(KERN_DEBUG " dma stat=%x cmdptr=%x\n", in_le32(&md->status), in_le32(&md->cmdptr)); printk(KERN_DEBUG " phase=%d msgphase=%d conn_tgt=%d data_ptr=%d\n", ms->phase, ms->msgphase, ms->conn_tgt, ms->data_ptr); printk(KERN_DEBUG " dma_st=%d dma_ct=%d n_msgout=%d\n", ms->dma_started, ms->dma_count, ms->n_msgout); for (t = 0; t < 8; ++t) { tp = &ms->tgts[t]; if (tp->current_req == NULL) continue; printk(KERN_DEBUG " target %d: req=%p goes_out=%d saved_ptr=%d\n", t, tp->current_req, tp->data_goes_out, tp->saved_ptr); } } /* * Flush write buffers on the bus path to the mesh */ static inline void mesh_flush_io(volatile struct mesh_regs __iomem *mr) { (void)in_8(&mr->mesh_id); } /* * Complete a SCSI command */ static void mesh_completed(struct mesh_state *ms, struct scsi_cmnd *cmd) { (*cmd->scsi_done)(cmd); } /* Called with meshinterrupt disabled, initialize the chipset * and eventually do the initial bus reset. The lock must not be * held since we can schedule. */ static void mesh_init(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; mesh_flush_io(mr); udelay(100); /* Reset controller */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); if (init_reset_delay) { printk(KERN_INFO "mesh: performing initial bus reset...\n"); /* Reset bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ mesh_flush_io(mr); /* Wait for bus to come back */ msleep(init_reset_delay); } /* Reconfigure controller */ out_8(&mr->interrupt, 0xff); /* clear all interrupt bits */ out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); ms->phase = idle; ms->msgphase = msg_none; } static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd) { volatile struct mesh_regs __iomem *mr = ms->mesh; int t, id; id = cmd->device->id; ms->current_req = cmd; ms->tgts[id].data_goes_out = cmd->sc_data_direction == DMA_TO_DEVICE; ms->tgts[id].current_req = cmd; #if 1 if (DEBUG_TARGET(cmd)) { int i; printk(KERN_DEBUG "mesh_start: %p tgt=%d cmd=", cmd, id); for (i = 0; i < cmd->cmd_len; ++i) printk(" %x", cmd->cmnd[i]); printk(" use_sg=%d buffer=%p bufflen=%u\n", scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd)); } #endif if (ms->dma_started) panic("mesh: double DMA start !\n"); ms->phase = arbitrating; ms->msgphase = msg_none; ms->data_ptr = 0; ms->dma_started = 0; ms->n_msgout = 0; ms->last_n_msgout = 0; ms->expect_reply = 0; ms->conn_tgt = id; ms->tgts[id].saved_ptr = 0; ms->stat = DID_OK; ms->aborting = 0; #ifdef MESH_DBG ms->tgts[id].n_log = 0; dlog(ms, "start cmd=%x", (int) cmd); #endif /* Off we go */ dlog(ms, "about to arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->interrupt, INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* * Some other device has the bus or is arbitrating for it - * probably a target which is about to reselect us. */ dlog(ms, "busy b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); for (t = 100; t > 0; --t) { if ((in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) == 0) break; if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr b4 arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; } udelay(1); } if (in_8(&mr->bus_status1) & (BS1_BSY | BS1_SEL)) { /* XXX should try again in a little while */ ms->stat = DID_BUS_BUSY; ms->phase = idle; mesh_done(ms, 0); return; } } /* * Apparently the mesh has a bug where it will assert both its * own bit and the target's bit on the bus during arbitration. */ out_8(&mr->dest_id, mr->source_id); /* * There appears to be a race with reselection sometimes, * where a target reselects us just as we issue the * arbitrate command. It seems that then the arbitrate * command just hangs waiting for the bus to be free * without giving us a reselection exception. * The only way I have found to get it to respond correctly * is this: disable reselection before issuing the arbitrate * command, then after issuing it, if it looks like a target * is trying to reselect us, reset the mesh and then enable * reselection. */ out_8(&mr->sequence, SEQ_DISRESEL); if (in_8(&mr->interrupt) != 0) { dlog(ms, "intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); mesh_interrupt(ms); if (ms->phase != arbitrating) return; dlog(ms, "after intr after disresel, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); } out_8(&mr->sequence, SEQ_ARBITRATE); for (t = 230; t > 0; --t) { if (in_8(&mr->interrupt) != 0) break; udelay(1); } dlog(ms, "after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { /* looks like a reselection - try resetting the mesh */ dlog(ms, "resel? after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(10); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); for (t = 10; t > 0 && in_8(&mr->interrupt) == 0; --t) udelay(1); dlog(ms, "tried reset after arb, intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); #ifndef MESH_MULTIPLE_HOSTS if (in_8(&mr->interrupt) == 0 && (in_8(&mr->bus_status1) & BS1_SEL) && (in_8(&mr->bus_status0) & BS0_IO)) { printk(KERN_ERR "mesh: controller not responding" " to reselection!\n"); /* * If this is a target reselecting us, and the * mesh isn't responding, the higher levels of * the scsi code will eventually time out and * reset the bus. */ } #endif } } /* * Start the next command for a MESH. * Should be called with interrupts disabled. */ static void mesh_start(struct mesh_state *ms) { struct scsi_cmnd *cmd, *prev, *next; if (ms->phase != idle || ms->current_req != NULL) { printk(KERN_ERR "inappropriate mesh_start (phase=%d, ms=%p)", ms->phase, ms); return; } while (ms->phase == idle) { prev = NULL; for (cmd = ms->request_q; ; cmd = (struct scsi_cmnd *) cmd->host_scribble) { if (cmd == NULL) return; if (ms->tgts[cmd->device->id].current_req == NULL) break; prev = cmd; } next = (struct scsi_cmnd *) cmd->host_scribble; if (prev == NULL) ms->request_q = next; else prev->host_scribble = (void *) next; if (next == NULL) ms->request_qtail = prev; mesh_start_cmd(ms, cmd); } } static void mesh_done(struct mesh_state *ms, int start_next) { struct scsi_cmnd *cmd; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; cmd = ms->current_req; ms->current_req = NULL; tp->current_req = NULL; if (cmd) { cmd->result = (ms->stat << 16) + cmd->SCp.Status; if (ms->stat == DID_OK) cmd->result += (cmd->SCp.Message << 8); if (DEBUG_TARGET(cmd)) { printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", cmd->result, ms->data_ptr, scsi_bufflen(cmd)); #if 0 /* needs to use sg? */ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3) && cmd->request_buffer != 0) { unsigned char *b = cmd->request_buffer; printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); } #endif } cmd->SCp.this_residual -= ms->data_ptr; mesh_completed(ms, cmd); } if (start_next) { out_8(&ms->mesh->sequence, SEQ_ENBRESEL); mesh_flush_io(ms->mesh); udelay(1); ms->phase = idle; mesh_start(ms); } } static inline void add_sdtr_msg(struct mesh_state *ms) { int i = ms->n_msgout; ms->msgout[i] = EXTENDED_MESSAGE; ms->msgout[i+1] = 3; ms->msgout[i+2] = EXTENDED_SDTR; ms->msgout[i+3] = mesh_sync_period/4; ms->msgout[i+4] = (ALLOW_SYNC(ms->conn_tgt)? mesh_sync_offset: 0); ms->n_msgout = i + 5; } static void set_sdtr(struct mesh_state *ms, int period, int offset) { struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; volatile struct mesh_regs __iomem *mr = ms->mesh; int v, tr; tp->sdtr_state = sdtr_done; if (offset == 0) { /* asynchronous */ if (SYNC_OFF(tp->sync_params)) printk(KERN_INFO "mesh: target %d now asynchronous\n", ms->conn_tgt); tp->sync_params = ASYNC_PARAMS; out_8(&mr->sync_params, ASYNC_PARAMS); return; } /* * We need to compute ceil(clk_freq * period / 500e6) - 2 * without incurring overflow. */ v = (ms->clk_freq / 5000) * period; if (v <= 250000) { /* special case: sync_period == 5 * clk_period */ v = 0; /* units of tr are 100kB/s */ tr = (ms->clk_freq + 250000) / 500000; } else { /* sync_period == (v + 2) * 2 * clk_period */ v = (v + 99999) / 100000 - 2; if (v > 15) v = 15; /* oops */ tr = ((ms->clk_freq / (v + 2)) + 199999) / 200000; } if (offset > 15) offset = 15; /* can't happen */ tp->sync_params = SYNC_PARAMS(offset, v); out_8(&mr->sync_params, tp->sync_params); printk(KERN_INFO "mesh: target %d synchronous at %d.%d MB/s\n", ms->conn_tgt, tr/10, tr%10); } static void start_phase(struct mesh_state *ms) { int i, seq, nb; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; dlog(ms, "start_phase nmo/exc/fc/seq = %.8x", MKWORD(ms->n_msgout, mr->exception, mr->fifo_count, mr->sequence)); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_none: break; case msg_in: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + seq); ms->n_msgin = 0; return; case msg_out: /* * To make sure ATN drops before we assert ACK for * the last byte of the message, we have to do the * last byte specially. */ if (ms->n_msgout <= 0) { printk(KERN_ERR "mesh: msg_out but n_msgout=%d\n", ms->n_msgout); mesh_dump_regs(ms); ms->msgphase = msg_none; break; } if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "mesh: sending %d msg bytes:", ms->n_msgout); for (i = 0; i < ms->n_msgout; ++i) printk(" %x", ms->msgout[i]); printk("\n"); } dlog(ms, "msgout msg=%.8x", MKWORD(ms->n_msgout, ms->msgout[0], ms->msgout[1], ms->msgout[2])); out_8(&mr->count_hi, 0); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); /* * If ATN is not already asserted, we assert it, then * issue a SEQ_MSGOUT to get the mesh to drop ACK. */ if ((in_8(&mr->bus_status0) & BS0_ATN) == 0) { dlog(ms, "bus0 was %.2x explicitly asserting ATN", mr->bus_status0); out_8(&mr->bus_status0, BS0_ATN); /* explicit ATN */ mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); out_8(&mr->bus_status0, 0); /* release explicit ATN */ dlog(ms,"hace: after explicit ATN bus0=%.2x",mr->bus_status0); } if (ms->n_msgout == 1) { /* * We can't issue the SEQ_MSGOUT without ATN * until the target has asserted REQ. The logic * in cmd_complete handles both situations: * REQ already asserted or not. */ cmd_complete(ms); } else { out_8(&mr->count_lo, ms->n_msgout - 1); out_8(&mr->sequence, SEQ_MSGOUT + seq); for (i = 0; i < ms->n_msgout - 1; ++i) out_8(&mr->fifo, ms->msgout[i]); } return; default: printk(KERN_ERR "mesh bug: start_phase msgphase=%d\n", ms->msgphase); } switch (ms->phase) { case selecting: out_8(&mr->dest_id, ms->conn_tgt); out_8(&mr->sequence, SEQ_SELECT + SEQ_ATN); break; case commanding: out_8(&mr->sync_params, tp->sync_params); out_8(&mr->count_hi, 0); if (cmd) { out_8(&mr->count_lo, cmd->cmd_len); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < cmd->cmd_len; ++i) out_8(&mr->fifo, cmd->cmnd[i]); } else { out_8(&mr->count_lo, 6); out_8(&mr->sequence, SEQ_COMMAND + seq); for (i = 0; i < 6; ++i) out_8(&mr->fifo, 0); } break; case dataing: /* transfer data, if any */ if (!ms->dma_started) { set_dma_cmds(ms, cmd); out_le32(&md->cmdptr, virt_to_phys(ms->dma_cmds)); out_le32(&md->control, (RUN << 16) | RUN); ms->dma_started = 1; } nb = ms->dma_count; if (nb > 0xfff0) nb = 0xfff0; ms->dma_count -= nb; ms->data_ptr += nb; out_8(&mr->count_lo, nb); out_8(&mr->count_hi, nb >> 8); out_8(&mr->sequence, (tp->data_goes_out? SEQ_DATAOUT: SEQ_DATAIN) + SEQ_DMA_MODE + seq); break; case statusing: out_8(&mr->count_hi, 0); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_STATUS + seq); break; case busfreeing: case disconnecting: out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); dlog(ms, "enbresel intr/exc/err/fc=%.8x", MKWORD(mr->interrupt, mr->exception, mr->error, mr->fifo_count)); out_8(&mr->sequence, SEQ_BUSFREE); break; default: printk(KERN_ERR "mesh: start_phase called with phase=%d\n", ms->phase); dumpslog(ms); } } static inline void get_msgin(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int i, n; n = mr->fifo_count; if (n != 0) { i = ms->n_msgin; ms->n_msgin = i + n; for (; n > 0; --n) ms->msgin[i++] = in_8(&mr->fifo); } } static inline int msgin_length(struct mesh_state *ms) { int b, n; n = 1; if (ms->n_msgin > 0) { b = ms->msgin[0]; if (b == 1) { /* extended message */ n = ms->n_msgin < 2? 2: ms->msgin[1] + 2; } else if (0x20 <= b && b <= 0x2f) { /* 2-byte message */ n = 2; } } return n; } static void reselected(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd; struct mesh_target *tp; int b, t, prev; switch (ms->phase) { case idle: break; case arbitrating: if ((cmd = ms->current_req) != NULL) { /* put the command back on the queue */ cmd->host_scribble = (void *) ms->request_q; if (ms->request_q == NULL) ms->request_qtail = cmd; ms->request_q = cmd; tp = &ms->tgts[cmd->device->id]; tp->current_req = NULL; } break; case busfreeing: ms->phase = reselecting; mesh_done(ms, 0); break; case disconnecting: break; default: printk(KERN_ERR "mesh: reselected in phase %d/%d tgt %d\n", ms->msgphase, ms->phase, ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } if (ms->dma_started) { printk(KERN_ERR "mesh: reselected with DMA started !\n"); halt_dma(ms); } ms->current_req = NULL; ms->phase = dataing; ms->msgphase = msg_in; ms->n_msgout = 0; ms->last_n_msgout = 0; prev = ms->conn_tgt; /* * We seem to get abortive reselections sometimes. */ while ((in_8(&mr->bus_status1) & BS1_BSY) == 0) { static int mesh_aborted_resels; mesh_aborted_resels++; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(5); dlog(ms, "extra resel err/exc/fc = %.6x", MKWORD(0, mr->error, mr->exception, mr->fifo_count)); } out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); mesh_flush_io(mr); udelay(1); out_8(&mr->sequence, SEQ_ENBRESEL); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); /* * Find out who reselected us. */ if (in_8(&mr->fifo_count) == 0) { printk(KERN_ERR "mesh: reselection but nothing in fifo?\n"); ms->conn_tgt = ms->host->this_id; goto bogus; } /* get the last byte in the fifo */ do { b = in_8(&mr->fifo); dlog(ms, "reseldata %x", b); } while (in_8(&mr->fifo_count)); for (t = 0; t < 8; ++t) if ((b & (1 << t)) != 0 && t != ms->host->this_id) break; if (b != (1 << t) + (1 << ms->host->this_id)) { printk(KERN_ERR "mesh: bad reselection data %x\n", b); ms->conn_tgt = ms->host->this_id; goto bogus; } /* * Set up to continue with that target's transfer. */ ms->conn_tgt = t; tp = &ms->tgts[t]; out_8(&mr->sync_params, tp->sync_params); if (ALLOW_DEBUG(t)) { printk(KERN_DEBUG "mesh: reselected by target %d\n", t); printk(KERN_DEBUG "mesh: saved_ptr=%x goes_out=%d cmd=%p\n", tp->saved_ptr, tp->data_goes_out, tp->current_req); } ms->current_req = tp->current_req; if (tp->current_req == NULL) { printk(KERN_ERR "mesh: reselected by tgt %d but no cmd!\n", t); goto bogus; } ms->data_ptr = tp->saved_ptr; dlog(ms, "resel prev tgt=%d", prev); dlog(ms, "resel err/exc=%.4x", MKWORD(0, 0, mr->error, mr->exception)); start_phase(ms); return; bogus: dumplog(ms, ms->conn_tgt); dumpslog(ms); ms->data_ptr = 0; ms->aborting = 1; start_phase(ms); } static void do_abort(struct mesh_state *ms) { ms->msgout[0] = ABORT; ms->n_msgout = 1; ms->aborting = 1; ms->stat = DID_ABORT; dlog(ms, "abort", 0); } static void handle_reset(struct mesh_state *ms) { int tgt; struct mesh_target *tp; struct scsi_cmnd *cmd; volatile struct mesh_regs __iomem *mr = ms->mesh; for (tgt = 0; tgt < 8; ++tgt) { tp = &ms->tgts[tgt]; if ((cmd = tp->current_req) != NULL) { cmd->result = DID_RESET << 16; tp->current_req = NULL; mesh_completed(ms, cmd); } ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; } ms->current_req = NULL; while ((cmd = ms->request_q) != NULL) { ms->request_q = (struct scsi_cmnd *) cmd->host_scribble; cmd->result = DID_RESET << 16; mesh_completed(ms, cmd); } ms->phase = idle; ms->msgphase = msg_none; out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->sync_params, ASYNC_PARAMS); out_8(&mr->sequence, SEQ_ENBRESEL); } static irqreturn_t do_mesh_interrupt(int irq, void *dev_id) { unsigned long flags; struct mesh_state *ms = dev_id; struct Scsi_Host *dev = ms->host; spin_lock_irqsave(dev->host_lock, flags); mesh_interrupt(ms); spin_unlock_irqrestore(dev->host_lock, flags); return IRQ_HANDLED; } static void handle_error(struct mesh_state *ms) { int err, exc, count; volatile struct mesh_regs __iomem *mr = ms->mesh; err = in_8(&mr->error); exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); dlog(ms, "error err/exc/fc/cl=%.8x", MKWORD(err, exc, mr->fifo_count, mr->count_lo)); if (err & ERR_SCSIRESET) { /* SCSI bus was reset */ printk(KERN_INFO "mesh: SCSI bus reset detected: " "waiting for end..."); while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; } if (err & ERR_UNEXPDISC) { /* Unexpected disconnect */ if (exc & EXC_RESELECTED) { reselected(ms); return; } if (!ms->aborting) { printk(KERN_WARNING "mesh: target %d aborted\n", ms->conn_tgt); dumplog(ms, ms->conn_tgt); dumpslog(ms); } out_8(&mr->interrupt, INT_CMDDONE); ms->stat = DID_ABORT; mesh_done(ms, 1); return; } if (err & ERR_PARITY) { if (ms->msgphase == msg_in) { printk(KERN_ERR "mesh: msg parity error, target %d\n", ms->conn_tgt); ms->msgout[0] = MSG_PARITY_ERROR; ms->n_msgout = 1; ms->msgphase = msg_in_bad; cmd_complete(ms); return; } if (ms->stat == DID_OK) { printk(KERN_ERR "mesh: parity error, target %d\n", ms->conn_tgt); ms->stat = DID_PARITY; } count = (mr->count_hi << 8) + mr->count_lo; if (count == 0) { cmd_complete(ms); } else { /* reissue the data transfer command */ out_8(&mr->sequence, mr->sequence); } return; } if (err & ERR_SEQERR) { if (exc & EXC_RESELECTED) { /* This can happen if we issue a command to get the bus just after the target reselects us. */ static int mesh_resel_seqerr; mesh_resel_seqerr++; reselected(ms); return; } if (exc == EXC_PHASEMM) { static int mesh_phasemm_seqerr; mesh_phasemm_seqerr++; phase_mismatch(ms); return; } printk(KERN_ERR "mesh: sequence error (err=%x exc=%x)\n", err, exc); } else { printk(KERN_ERR "mesh: unknown error %x (exc=%x)\n", err, exc); } mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); if (ms->phase > selecting && (in_8(&mr->bus_status1) & BS1_BSY)) { /* try to do what the target wants */ do_abort(ms); phase_mismatch(ms); return; } ms->stat = DID_ERROR; mesh_done(ms, 1); } static void handle_exception(struct mesh_state *ms) { int exc; volatile struct mesh_regs __iomem *mr = ms->mesh; exc = in_8(&mr->exception); out_8(&mr->interrupt, INT_EXCEPTION | INT_CMDDONE); if (exc & EXC_RESELECTED) { static int mesh_resel_exc; mesh_resel_exc++; reselected(ms); } else if (exc == EXC_ARBLOST) { printk(KERN_DEBUG "mesh: lost arbitration\n"); ms->stat = DID_BUS_BUSY; mesh_done(ms, 1); } else if (exc == EXC_SELTO) { /* selection timed out */ ms->stat = DID_BAD_TARGET; mesh_done(ms, 1); } else if (exc == EXC_PHASEMM) { /* target wants to do something different: find out what it wants and do it. */ phase_mismatch(ms); } else { printk(KERN_ERR "mesh: can't cope with exception %x\n", exc); mesh_dump_regs(ms); dumplog(ms, ms->conn_tgt); do_abort(ms); phase_mismatch(ms); } } static void handle_msgin(struct mesh_state *ms) { int i, code; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; if (ms->n_msgin == 0) return; code = ms->msgin[0]; if (ALLOW_DEBUG(ms->conn_tgt)) { printk(KERN_DEBUG "got %d message bytes:", ms->n_msgin); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); } dlog(ms, "msgin msg=%.8x", MKWORD(ms->n_msgin, code, ms->msgin[1], ms->msgin[2])); ms->expect_reply = 0; ms->n_msgout = 0; if (ms->n_msgin < msgin_length(ms)) goto reject; if (cmd) cmd->SCp.Message = code; switch (code) { case COMMAND_COMPLETE: break; case EXTENDED_MESSAGE: switch (ms->msgin[2]) { case EXTENDED_MODIFY_DATA_POINTER: ms->data_ptr += (ms->msgin[3] << 24) + ms->msgin[6] + (ms->msgin[4] << 16) + (ms->msgin[5] << 8); break; case EXTENDED_SDTR: if (tp->sdtr_state != sdtr_sent) { /* reply with an SDTR */ add_sdtr_msg(ms); /* limit period to at least his value, offset to no more than his */ if (ms->msgout[3] < ms->msgin[3]) ms->msgout[3] = ms->msgin[3]; if (ms->msgout[4] > ms->msgin[4]) ms->msgout[4] = ms->msgin[4]; set_sdtr(ms, ms->msgout[3], ms->msgout[4]); ms->msgphase = msg_out; } else { set_sdtr(ms, ms->msgin[3], ms->msgin[4]); } break; default: goto reject; } break; case SAVE_POINTERS: tp->saved_ptr = ms->data_ptr; break; case RESTORE_POINTERS: ms->data_ptr = tp->saved_ptr; break; case DISCONNECT: ms->phase = disconnecting; break; case ABORT: break; case MESSAGE_REJECT: if (tp->sdtr_state == sdtr_sent) set_sdtr(ms, 0, 0); break; case NOP: break; default: if (IDENTIFY_BASE <= code && code <= IDENTIFY_BASE + 7) { if (cmd == NULL) { do_abort(ms); ms->msgphase = msg_out; } else if (code != cmd->device->lun + IDENTIFY_BASE) { printk(KERN_WARNING "mesh: lun mismatch " "(%d != %d) on reselection from " "target %d\n", code - IDENTIFY_BASE, cmd->device->lun, ms->conn_tgt); } break; } goto reject; } return; reject: printk(KERN_WARNING "mesh: rejecting message from target %d:", ms->conn_tgt); for (i = 0; i < ms->n_msgin; ++i) printk(" %x", ms->msgin[i]); printk("\n"); ms->msgout[0] = MESSAGE_REJECT; ms->n_msgout = 1; ms->msgphase = msg_out; } /* * Set up DMA commands for transferring data. */ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd) { int i, dma_cmd, total, off, dtot; struct scatterlist *scl; struct dbdma_cmd *dcmds; dma_cmd = ms->tgts[ms->conn_tgt].data_goes_out? OUTPUT_MORE: INPUT_MORE; dcmds = ms->dma_cmds; dtot = 0; if (cmd) { int nseg; cmd->SCp.this_residual = scsi_bufflen(cmd); nseg = scsi_dma_map(cmd); BUG_ON(nseg < 0); if (nseg) { total = 0; off = ms->data_ptr; scsi_for_each_sg(cmd, scl, nseg, i) { u32 dma_addr = sg_dma_address(scl); u32 dma_len = sg_dma_len(scl); total += scl->length; if (off >= dma_len) { off -= dma_len; continue; } if (dma_len > 0xffff) panic("mesh: scatterlist element >= 64k"); st_le16(&dcmds->req_count, dma_len - off); st_le16(&dcmds->command, dma_cmd); st_le32(&dcmds->phy_addr, dma_addr + off); dcmds->xfer_status = 0; ++dcmds; dtot += dma_len - off; off = 0; } } } if (dtot == 0) { /* Either the target has overrun our buffer, or the caller didn't provide a buffer. */ static char mesh_extra_buf[64]; dtot = sizeof(mesh_extra_buf); st_le16(&dcmds->req_count, dtot); st_le32(&dcmds->phy_addr, virt_to_phys(mesh_extra_buf)); dcmds->xfer_status = 0; ++dcmds; } dma_cmd += OUTPUT_LAST - OUTPUT_MORE; st_le16(&dcmds[-1].command, dma_cmd); memset(dcmds, 0, sizeof(*dcmds)); st_le16(&dcmds->command, DBDMA_STOP); ms->dma_count = dtot; } static void halt_dma(struct mesh_state *ms) { volatile struct dbdma_regs __iomem *md = ms->dma; volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; int t, nb; if (!ms->tgts[ms->conn_tgt].data_goes_out) { /* wait a little while until the fifo drains */ t = 50; while (t > 0 && in_8(&mr->fifo_count) != 0 && (in_le32(&md->status) & ACTIVE) != 0) { --t; udelay(1); } } out_le32(&md->control, RUN << 16); /* turn off RUN bit */ nb = (mr->count_hi << 8) + mr->count_lo; dlog(ms, "halt_dma fc/count=%.6x", MKWORD(0, mr->fifo_count, 0, nb)); if (ms->tgts[ms->conn_tgt].data_goes_out) nb += mr->fifo_count; /* nb is the number of bytes not yet transferred to/from the target. */ ms->data_ptr -= nb; dlog(ms, "data_ptr %x", ms->data_ptr); if (ms->data_ptr < 0) { printk(KERN_ERR "mesh: halt_dma: data_ptr=%d (nb=%d, ms=%p)\n", ms->data_ptr, nb, ms); ms->data_ptr = 0; #ifdef MESH_DBG dumplog(ms, ms->conn_tgt); dumpslog(ms); #endif /* MESH_DBG */ } else if (cmd && scsi_bufflen(cmd) && ms->data_ptr > scsi_bufflen(cmd)) { printk(KERN_DEBUG "mesh: target %d overrun, " "data_ptr=%x total=%x goes_out=%d\n", ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } scsi_dma_unmap(cmd); ms->dma_started = 0; } static void phase_mismatch(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int phase; dlog(ms, "phasemm ch/cl/seq/fc=%.8x", MKWORD(mr->count_hi, mr->count_lo, mr->sequence, mr->fifo_count)); phase = in_8(&mr->bus_status0) & BS0_PHASE; if (ms->msgphase == msg_out_xxx && phase == BP_MSGOUT) { /* output the last byte of the message, without ATN */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; return; } if (ms->msgphase == msg_in) { get_msgin(ms); if (ms->n_msgin) handle_msgin(ms); } if (ms->dma_started) halt_dma(ms); if (mr->fifo_count) { out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); } ms->msgphase = msg_none; switch (phase) { case BP_DATAIN: ms->tgts[ms->conn_tgt].data_goes_out = 0; ms->phase = dataing; break; case BP_DATAOUT: ms->tgts[ms->conn_tgt].data_goes_out = 1; ms->phase = dataing; break; case BP_COMMAND: ms->phase = commanding; break; case BP_STATUS: ms->phase = statusing; break; case BP_MSGIN: ms->msgphase = msg_in; ms->n_msgin = 0; break; case BP_MSGOUT: ms->msgphase = msg_out; if (ms->n_msgout == 0) { if (ms->aborting) { do_abort(ms); } else { if (ms->last_n_msgout == 0) { printk(KERN_DEBUG "mesh: no msg to repeat\n"); ms->msgout[0] = NOP; ms->last_n_msgout = 1; } ms->n_msgout = ms->last_n_msgout; } } break; default: printk(KERN_DEBUG "mesh: unknown scsi phase %x\n", phase); ms->stat = DID_ERROR; mesh_done(ms, 1); return; } start_phase(ms); } static void cmd_complete(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; struct scsi_cmnd *cmd = ms->current_req; struct mesh_target *tp = &ms->tgts[ms->conn_tgt]; int seq, n, t; dlog(ms, "cmd_complete fc=%x", mr->fifo_count); seq = use_active_neg + (ms->n_msgout? SEQ_ATN: 0); switch (ms->msgphase) { case msg_out_xxx: /* huh? we expected a phase mismatch */ ms->n_msgin = 0; ms->msgphase = msg_in; /* fall through */ case msg_in: /* should have some message bytes in fifo */ get_msgin(ms); n = msgin_length(ms); if (ms->n_msgin < n) { out_8(&mr->count_lo, n - ms->n_msgin); out_8(&mr->sequence, SEQ_MSGIN + seq); } else { ms->msgphase = msg_none; handle_msgin(ms); start_phase(ms); } break; case msg_in_bad: out_8(&mr->sequence, SEQ_FLUSHFIFO); mesh_flush_io(mr); udelay(1); out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGIN + SEQ_ATN + use_active_neg); break; case msg_out: /* * To get the right timing on ATN wrt ACK, we have * to get the MESH to drop ACK, wait until REQ gets * asserted, then drop ATN. To do this we first * issue a SEQ_MSGOUT with ATN and wait for REQ, * then change the command to a SEQ_MSGOUT w/o ATN. * If we don't see REQ in a reasonable time, we * change the command to SEQ_MSGIN with ATN, * wait for the phase mismatch interrupt, then * issue the SEQ_MSGOUT without ATN. */ out_8(&mr->count_lo, 1); out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg + SEQ_ATN); t = 30; /* wait up to 30us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0 && --t >= 0) udelay(1); dlog(ms, "last_mbyte err/exc/fc/cl=%.8x", MKWORD(mr->error, mr->exception, mr->fifo_count, mr->count_lo)); if (in_8(&mr->interrupt) & (INT_ERROR | INT_EXCEPTION)) { /* whoops, target didn't do what we expected */ ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; if (in_8(&mr->interrupt) & INT_ERROR) { printk(KERN_ERR "mesh: error %x in msg_out\n", in_8(&mr->error)); handle_error(ms); return; } if (in_8(&mr->exception) != EXC_PHASEMM) printk(KERN_ERR "mesh: exc %x in msg_out\n", in_8(&mr->exception)); else printk(KERN_DEBUG "mesh: bs0=%x in msg_out\n", in_8(&mr->bus_status0)); handle_exception(ms); return; } if (in_8(&mr->bus_status0) & BS0_REQ) { out_8(&mr->sequence, SEQ_MSGOUT + use_active_neg); mesh_flush_io(mr); udelay(1); out_8(&mr->fifo, ms->msgout[ms->n_msgout-1]); ms->msgphase = msg_out_last; } else { out_8(&mr->sequence, SEQ_MSGIN + use_active_neg + SEQ_ATN); ms->msgphase = msg_out_xxx; } break; case msg_out_last: ms->last_n_msgout = ms->n_msgout; ms->n_msgout = 0; ms->msgphase = ms->expect_reply? msg_in: msg_none; start_phase(ms); break; case msg_none: switch (ms->phase) { case idle: printk(KERN_ERR "mesh: interrupt in idle phase?\n"); dumpslog(ms); return; case selecting: dlog(ms, "Selecting phase at command completion",0); ms->msgout[0] = IDENTIFY(ALLOW_RESEL(ms->conn_tgt), (cmd? cmd->device->lun: 0)); ms->n_msgout = 1; ms->expect_reply = 0; if (ms->aborting) { ms->msgout[0] = ABORT; ms->n_msgout++; } else if (tp->sdtr_state == do_sdtr) { /* add SDTR message */ add_sdtr_msg(ms); ms->expect_reply = 1; tp->sdtr_state = sdtr_sent; } ms->msgphase = msg_out; /* * We need to wait for REQ before dropping ATN. * We wait for at most 30us, then fall back to * a scheme where we issue a SEQ_COMMAND with ATN, * which will give us a phase mismatch interrupt * when REQ does come, and then we send the message. */ t = 230; /* wait up to 230us */ while ((in_8(&mr->bus_status0) & BS0_REQ) == 0) { if (--t < 0) { dlog(ms, "impatient for req", ms->n_msgout); ms->msgphase = msg_none; break; } udelay(1); } break; case dataing: if (ms->dma_count != 0) { start_phase(ms); return; } /* * We can get a phase mismatch here if the target * changes to the status phase, even though we have * had a command complete interrupt. Then, if we * issue the SEQ_STATUS command, we'll get a sequence * error interrupt. Which isn't so bad except that * occasionally the mesh actually executes the * SEQ_STATUS *as well as* giving us the sequence * error and phase mismatch exception. */ out_8(&mr->sequence, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); halt_dma(ms); break; case statusing: if (cmd) { cmd->SCp.Status = mr->fifo; if (DEBUG_TARGET(cmd)) printk(KERN_DEBUG "mesh: status is %x\n", cmd->SCp.Status); } ms->msgphase = msg_in; break; case busfreeing: mesh_done(ms, 1); return; case disconnecting: ms->current_req = NULL; ms->phase = idle; mesh_start(ms); return; default: break; } ++ms->phase; start_phase(ms); break; } } /* * Called by midlayer with host locked to queue a new * request */ static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct mesh_state *ms; cmd->scsi_done = done; cmd->host_scribble = NULL; ms = (struct mesh_state *) cmd->device->host->hostdata; if (ms->request_q == NULL) ms->request_q = cmd; else ms->request_qtail->host_scribble = (void *) cmd; ms->request_qtail = cmd; if (ms->phase == idle) mesh_start(ms); return 0; } static DEF_SCSI_QCMD(mesh_queue) /* * Called to handle interrupts, either call by the interrupt * handler (do_mesh_interrupt) or by other functions in * exceptional circumstances */ static void mesh_interrupt(struct mesh_state *ms) { volatile struct mesh_regs __iomem *mr = ms->mesh; int intr; #if 0 if (ALLOW_DEBUG(ms->conn_tgt)) printk(KERN_DEBUG "mesh_intr, bs0=%x int=%x exc=%x err=%x " "phase=%d msgphase=%d\n", mr->bus_status0, mr->interrupt, mr->exception, mr->error, ms->phase, ms->msgphase); #endif while ((intr = in_8(&mr->interrupt)) != 0) { dlog(ms, "interrupt intr/err/exc/seq=%.8x", MKWORD(intr, mr->error, mr->exception, mr->sequence)); if (intr & INT_ERROR) { handle_error(ms); } else if (intr & INT_EXCEPTION) { handle_exception(ms); } else if (intr & INT_CMDDONE) { out_8(&mr->interrupt, INT_CMDDONE); cmd_complete(ms); } } } /* Todo: here we can at least try to remove the command from the * queue if it isn't connected yet, and for pending command, assert * ATN until the bus gets freed. */ static int mesh_abort(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; printk(KERN_DEBUG "mesh_abort(%p)\n", cmd); mesh_dump_regs(ms); dumplog(ms, cmd->device->id); dumpslog(ms); return FAILED; } /* * Called by the midlayer with the lock held to reset the * SCSI host and bus. * The midlayer will wait for devices to come back, we don't need * to do that ourselves */ static int mesh_host_reset(struct scsi_cmnd *cmd) { struct mesh_state *ms = (struct mesh_state *) cmd->device->host->hostdata; volatile struct mesh_regs __iomem *mr = ms->mesh; volatile struct dbdma_regs __iomem *md = ms->dma; unsigned long flags; printk(KERN_DEBUG "mesh_host_reset\n"); spin_lock_irqsave(ms->host->host_lock, flags); /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ out_8(&mr->error, 0xff); /* clear all error bits */ out_8(&mr->sequence, SEQ_RESETMESH); mesh_flush_io(mr); udelay(1); out_8(&mr->intr_mask, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->source_id, ms->host->this_id); out_8(&mr->sel_timeout, 25); /* 250ms */ out_8(&mr->sync_params, ASYNC_PARAMS); /* Reset the bus */ out_8(&mr->bus_status1, BS1_RST); /* assert RST */ mesh_flush_io(mr); udelay(30); /* leave it on for >= 25us */ out_8(&mr->bus_status1, 0); /* negate RST */ /* Complete pending commands */ handle_reset(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); return SUCCESS; } static void set_mesh_power(struct mesh_state *ms, int state) { if (!machine_is(powermac)) return; if (state) { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 1); msleep(200); } else { pmac_call_feature(PMAC_FTR_MESH_ENABLE, macio_get_of_node(ms->mdev), 0, 0); msleep(10); } } #ifdef CONFIG_PM static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; switch (mesg.event) { case PM_EVENT_SUSPEND: case PM_EVENT_HIBERNATE: case PM_EVENT_FREEZE: break; default: return 0; } if (ms->phase == sleeping) return 0; scsi_block_requests(ms->host); spin_lock_irqsave(ms->host->host_lock, flags); while(ms->phase != idle) { spin_unlock_irqrestore(ms->host->host_lock, flags); msleep(10); spin_lock_irqsave(ms->host->host_lock, flags); } ms->phase = sleeping; spin_unlock_irqrestore(ms->host->host_lock, flags); disable_irq(ms->meshintr); set_mesh_power(ms, 0); return 0; } static int mesh_resume(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); unsigned long flags; if (ms->phase != sleeping) return 0; set_mesh_power(ms, 1); mesh_init(ms); spin_lock_irqsave(ms->host->host_lock, flags); mesh_start(ms); spin_unlock_irqrestore(ms->host->host_lock, flags); enable_irq(ms->meshintr); scsi_unblock_requests(ms->host); return 0; } #endif /* CONFIG_PM */ /* * If we leave drives set for synchronous transfers (especially * CDROMs), and reboot to MacOS, it gets confused, poor thing. * So, on reboot we reset the SCSI bus. */ static int mesh_shutdown(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); volatile struct mesh_regs __iomem *mr; unsigned long flags; printk(KERN_INFO "resetting MESH scsi bus(es)\n"); spin_lock_irqsave(ms->host->host_lock, flags); mr = ms->mesh; out_8(&mr->intr_mask, 0); out_8(&mr->interrupt, INT_ERROR | INT_EXCEPTION | INT_CMDDONE); out_8(&mr->bus_status1, BS1_RST); mesh_flush_io(mr); udelay(30); out_8(&mr->bus_status1, 0); spin_unlock_irqrestore(ms->host->host_lock, flags); return 0; } static struct scsi_host_template mesh_template = { .proc_name = "mesh", .name = "MESH", .queuecommand = mesh_queue, .eh_abort_handler = mesh_abort, .eh_host_reset_handler = mesh_host_reset, .can_queue = 20, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .use_clustering = DISABLE_CLUSTERING, }; static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *mesh = macio_get_of_node(mdev); struct pci_dev* pdev = macio_get_pci_dev(mdev); int tgt, minper; const int *cfp; struct mesh_state *ms; struct Scsi_Host *mesh_host; void *dma_cmd_space; dma_addr_t dma_cmd_bus; switch (mdev->bus->chip->type) { case macio_heathrow: case macio_gatwick: case macio_paddington: use_active_neg = 0; break; default: use_active_neg = SEQ_ACTIVE_NEG; } if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs" " (got %d,%d)\n", macio_resource_count(mdev), macio_irq_count(mdev)); return -ENODEV; } if (macio_request_resources(mdev, "mesh") != 0) { printk(KERN_ERR "mesh: unable to request memory resources"); return -EBUSY; } mesh_host = scsi_host_alloc(&mesh_template, sizeof(struct mesh_state)); if (mesh_host == NULL) { printk(KERN_ERR "mesh: couldn't register host"); goto out_release; } /* Old junk for root discovery, that will die ultimately */ #if !defined(MODULE) note_scsi_host(mesh, mesh_host); #endif mesh_host->base = macio_resource_start(mdev, 0); mesh_host->irq = macio_irq(mdev, 0); ms = (struct mesh_state *) mesh_host->hostdata; macio_set_drvdata(mdev, ms); ms->host = mesh_host; ms->mdev = mdev; ms->pdev = pdev; ms->mesh = ioremap(macio_resource_start(mdev, 0), 0x1000); if (ms->mesh == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); goto out_free; } ms->dma = ioremap(macio_resource_start(mdev, 1), 0x1000); if (ms->dma == NULL) { printk(KERN_ERR "mesh: can't map registers\n"); iounmap(ms->mesh); goto out_free; } ms->meshintr = macio_irq(mdev, 0); ms->dmaintr = macio_irq(mdev, 1); /* Space for dma command list: +1 for stop command, * +1 to allow for aligning. */ ms->dma_cmd_size = (mesh_host->sg_tablesize + 2) * sizeof(struct dbdma_cmd); /* We use the PCI APIs for now until the generic one gets fixed * enough or until we get some macio-specific versions */ dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, &dma_cmd_bus); if (dma_cmd_space == NULL) { printk(KERN_ERR "mesh: can't allocate DMA table\n"); goto out_unmap; } memset(dma_cmd_space, 0, ms->dma_cmd_size); ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space); ms->dma_cmd_space = dma_cmd_space; ms->dma_cmd_bus = dma_cmd_bus + ((unsigned long)ms->dma_cmds) - (unsigned long)dma_cmd_space; ms->current_req = NULL; for (tgt = 0; tgt < 8; ++tgt) { ms->tgts[tgt].sdtr_state = do_sdtr; ms->tgts[tgt].sync_params = ASYNC_PARAMS; ms->tgts[tgt].current_req = NULL; } if ((cfp = of_get_property(mesh, "clock-frequency", NULL))) ms->clk_freq = *cfp; else { printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); ms->clk_freq = 50000000; } /* The maximum sync rate is clock / 5; increase * mesh_sync_period if necessary. */ minper = 1000000000 / (ms->clk_freq / 5); /* ns */ if (mesh_sync_period < minper) mesh_sync_period = minper; /* Power up the chip */ set_mesh_power(ms, 1); /* Set it up */ mesh_init(ms); /* Request interrupt */ if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) { printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); goto out_shutdown; } /* Add scsi host & scan */ if (scsi_add_host(mesh_host, &mdev->ofdev.dev)) goto out_release_irq; scsi_scan_host(mesh_host); return 0; out_release_irq: free_irq(ms->meshintr, ms); out_shutdown: /* shutdown & reset bus in case of error or macos can be confused * at reboot if the bus was set to synchronous mode already */ mesh_shutdown(mdev); set_mesh_power(ms, 0); pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); out_unmap: iounmap(ms->dma); iounmap(ms->mesh); out_free: scsi_host_put(mesh_host); out_release: macio_release_resources(mdev); return -ENODEV; } static int mesh_remove(struct macio_dev *mdev) { struct mesh_state *ms = (struct mesh_state *)macio_get_drvdata(mdev); struct Scsi_Host *mesh_host = ms->host; scsi_remove_host(mesh_host); free_irq(ms->meshintr, ms); /* Reset scsi bus */ mesh_shutdown(mdev); /* Shut down chip & termination */ set_mesh_power(ms, 0); /* Unmap registers & dma controller */ iounmap(ms->mesh); iounmap(ms->dma); /* Free DMA commands memory */ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, ms->dma_cmd_space, ms->dma_cmd_bus); /* Release memory resources */ macio_release_resources(mdev); scsi_host_put(mesh_host); return 0; } static struct of_device_id mesh_match[] = { { .name = "mesh", }, { .type = "scsi", .compatible = "chrp,mesh0" }, {}, }; MODULE_DEVICE_TABLE (of, mesh_match); static struct macio_driver mesh_driver = { .driver = { .name = "mesh", .owner = THIS_MODULE, .of_match_table = mesh_match, }, .probe = mesh_probe, .remove = mesh_remove, .shutdown = mesh_shutdown, #ifdef CONFIG_PM .suspend = mesh_suspend, .resume = mesh_resume, #endif }; static int __init init_mesh(void) { /* Calculate sync rate from module parameters */ if (sync_rate > 10) sync_rate = 10; if (sync_rate > 0) { printk(KERN_INFO "mesh: configured for synchronous %d MB/s\n", sync_rate); mesh_sync_period = 1000 / sync_rate; /* ns */ mesh_sync_offset = 15; } else printk(KERN_INFO "mesh: configured for asynchronous\n"); return macio_register_driver(&mesh_driver); } static void __exit exit_mesh(void) { return macio_unregister_driver(&mesh_driver); } module_init(init_mesh); module_exit(exit_mesh);
gpl-2.0
jiangchao87/m8uhl
arch/arm/mach-imx/mach-cpuimx51sd.c
4829
9095
/* * * Copyright (C) 2010 Eric Bénard <eric@eukrea.com> * * based on board-mx51_babbage.c which is * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com> * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/tsc2007.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/i2c-gpio.h> #include <linux/spi/spi.h> #include <linux/can/platform/mcp251x.h> #include <mach/eukrea-baseboards.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx51.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "devices-imx51.h" #include "cpu_op-mx51.h" #define USBH1_RST IMX_GPIO_NR(2, 28) #define ETH_RST IMX_GPIO_NR(2, 31) #define TSC2007_IRQGPIO IMX_GPIO_NR(3, 12) #define CAN_IRQGPIO IMX_GPIO_NR(1, 1) #define CAN_RST IMX_GPIO_NR(4, 15) #define CAN_NCS IMX_GPIO_NR(4, 24) #define CAN_RXOBF IMX_GPIO_NR(1, 4) #define CAN_RX1BF IMX_GPIO_NR(1, 6) #define CAN_TXORTS IMX_GPIO_NR(1, 7) #define CAN_TX1RTS IMX_GPIO_NR(1, 8) #define CAN_TX2RTS IMX_GPIO_NR(1, 9) #define I2C_SCL IMX_GPIO_NR(4, 16) #define I2C_SDA IMX_GPIO_NR(4, 17) /* USB_CTRL_1 */ #define MX51_USB_CTRL_1_OFFSET 0x10 #define MX51_USB_CTRL_UH1_EXT_CLK_EN (1 << 25) #define MX51_USB_PLLDIV_12_MHZ 0x00 #define MX51_USB_PLL_DIV_19_2_MHZ 0x01 #define MX51_USB_PLL_DIV_24_MHZ 0x02 static iomux_v3_cfg_t eukrea_cpuimx51sd_pads[] = { /* UART1 */ MX51_PAD_UART1_RXD__UART1_RXD, MX51_PAD_UART1_TXD__UART1_TXD, MX51_PAD_UART1_RTS__UART1_RTS, MX51_PAD_UART1_CTS__UART1_CTS, /* USB HOST1 */ MX51_PAD_USBH1_CLK__USBH1_CLK, MX51_PAD_USBH1_DIR__USBH1_DIR, MX51_PAD_USBH1_NXT__USBH1_NXT, MX51_PAD_USBH1_DATA0__USBH1_DATA0, MX51_PAD_USBH1_DATA1__USBH1_DATA1, MX51_PAD_USBH1_DATA2__USBH1_DATA2, MX51_PAD_USBH1_DATA3__USBH1_DATA3, MX51_PAD_USBH1_DATA4__USBH1_DATA4, MX51_PAD_USBH1_DATA5__USBH1_DATA5, MX51_PAD_USBH1_DATA6__USBH1_DATA6, MX51_PAD_USBH1_DATA7__USBH1_DATA7, MX51_PAD_USBH1_STP__USBH1_STP, MX51_PAD_EIM_CS3__GPIO2_28, /* PHY nRESET */ /* FEC */ MX51_PAD_EIM_DTACK__GPIO2_31, /* PHY nRESET */ /* HSI2C */ MX51_PAD_I2C1_CLK__GPIO4_16, MX51_PAD_I2C1_DAT__GPIO4_17, /* CAN */ MX51_PAD_CSPI1_MOSI__ECSPI1_MOSI, MX51_PAD_CSPI1_MISO__ECSPI1_MISO, MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK, MX51_PAD_CSPI1_SS0__GPIO4_24, /* nCS */ MX51_PAD_CSI2_PIXCLK__GPIO4_15, /* nReset */ MX51_PAD_GPIO1_1__GPIO1_1, /* IRQ */ MX51_PAD_GPIO1_4__GPIO1_4, /* Control signals */ MX51_PAD_GPIO1_6__GPIO1_6, MX51_PAD_GPIO1_7__GPIO1_7, MX51_PAD_GPIO1_8__GPIO1_8, MX51_PAD_GPIO1_9__GPIO1_9, /* Touchscreen */ /* IRQ */ NEW_PAD_CTRL(MX51_PAD_GPIO_NAND__GPIO_NAND, PAD_CTL_PUS_22K_UP | PAD_CTL_PKE | PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUE | PAD_CTL_HYS), }; static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static struct tsc2007_platform_data tsc2007_info = { .model = 2007, .x_plate_ohms = 180, }; static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = { { I2C_BOARD_INFO("pcf8563", 0x51), }, { I2C_BOARD_INFO("tsc2007", 0x49), .type = "tsc2007", .platform_data = &tsc2007_info, .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO), }, }; static const struct mxc_nand_platform_data eukrea_cpuimx51sd_nand_board_info __initconst = { .width = 1, .hw_ecc = 1, .flash_bbt = 1, }; /* This function is board specific as the bit mask for the plldiv will also be different for other Freescale SoCs, thus a common bitmask is not possible and cannot get place in /plat-mxc/ehci.c.*/ static int initialize_otg_port(struct platform_device *pdev) { u32 v; void __iomem *usb_base; void __iomem *usbother_base; usb_base = ioremap(MX51_USB_OTG_BASE_ADDR, SZ_4K); if (!usb_base) return -ENOMEM; usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET; /* Set the PHY clock to 19.2MHz */ v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET); v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK; v |= MX51_USB_PLL_DIV_19_2_MHZ; __raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET); iounmap(usb_base); mdelay(10); return mx51_initialize_usb_hw(0, MXC_EHCI_INTERNAL_PHY); } static int initialize_usbh1_port(struct platform_device *pdev) { u32 v; void __iomem *usb_base; void __iomem *usbother_base; usb_base = ioremap(MX51_USB_OTG_BASE_ADDR, SZ_4K); if (!usb_base) return -ENOMEM; usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET; /* The clock for the USBH1 ULPI port will come from the PHY. */ v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET); __raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET); iounmap(usb_base); mdelay(10); return mx51_initialize_usb_hw(1, MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_ITC_NO_THRESHOLD); } static const struct mxc_usbh_platform_data dr_utmi_config __initconst = { .init = initialize_otg_port, .portsc = MXC_EHCI_UTMI_16BIT, }; static const struct fsl_usb2_platform_data usb_pdata __initconst = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_UTMI_WIDE, }; static const struct mxc_usbh_platform_data usbh1_config __initconst = { .init = initialize_usbh1_port, .portsc = MXC_EHCI_MODE_ULPI, }; static int otg_mode_host; static int __init eukrea_cpuimx51sd_otg_mode(char *options) { if (!strcmp(options, "host")) otg_mode_host = 1; else if (!strcmp(options, "device")) otg_mode_host = 0; else pr_info("otg_mode neither \"host\" nor \"device\". " "Defaulting to device\n"); return 0; } __setup("otg_mode=", eukrea_cpuimx51sd_otg_mode); static struct i2c_gpio_platform_data pdata = { .sda_pin = I2C_SDA, .sda_is_open_drain = 0, .scl_pin = I2C_SCL, .scl_is_open_drain = 0, .udelay = 2, }; static struct platform_device hsi2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev.platform_data = &pdata, }; static struct mcp251x_platform_data mcp251x_info = { .oscillator_frequency = 24E6, }; static struct spi_board_info cpuimx51sd_spi_device[] = { { .modalias = "mcp2515", .max_speed_hz = 10000000, .bus_num = 0, .mode = SPI_MODE_0, .chip_select = 0, .platform_data = &mcp251x_info, .irq = IMX_GPIO_TO_IRQ(CAN_IRQGPIO) }, }; static int cpuimx51sd_spi1_cs[] = { CAN_NCS, }; static const struct spi_imx_master cpuimx51sd_ecspi1_pdata __initconst = { .chipselect = cpuimx51sd_spi1_cs, .num_chipselect = ARRAY_SIZE(cpuimx51sd_spi1_cs), }; static struct platform_device *platform_devices[] __initdata = { &hsi2c_gpio_device, }; static void __init eukrea_cpuimx51sd_init(void) { imx51_soc_init(); mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads, ARRAY_SIZE(eukrea_cpuimx51sd_pads)); #if defined(CONFIG_CPU_FREQ_IMX) get_cpu_op = mx51_get_cpu_op; #endif imx51_add_imx_uart(0, &uart_pdata); imx51_add_mxc_nand(&eukrea_cpuimx51sd_nand_board_info); gpio_request(ETH_RST, "eth_rst"); gpio_set_value(ETH_RST, 1); imx51_add_fec(NULL); gpio_request(CAN_IRQGPIO, "can_irq"); gpio_direction_input(CAN_IRQGPIO); gpio_free(CAN_IRQGPIO); gpio_request(CAN_NCS, "can_ncs"); gpio_direction_output(CAN_NCS, 1); gpio_free(CAN_NCS); gpio_request(CAN_RST, "can_rst"); gpio_direction_output(CAN_RST, 0); msleep(20); gpio_set_value(CAN_RST, 1); imx51_add_ecspi(0, &cpuimx51sd_ecspi1_pdata); spi_register_board_info(cpuimx51sd_spi_device, ARRAY_SIZE(cpuimx51sd_spi_device)); gpio_request(TSC2007_IRQGPIO, "tsc2007_irq"); gpio_direction_input(TSC2007_IRQGPIO); gpio_free(TSC2007_IRQGPIO); i2c_register_board_info(0, eukrea_cpuimx51sd_i2c_devices, ARRAY_SIZE(eukrea_cpuimx51sd_i2c_devices)); platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); if (otg_mode_host) imx51_add_mxc_ehci_otg(&dr_utmi_config); else { initialize_otg_port(NULL); imx51_add_fsl_usb2_udc(&usb_pdata); } gpio_request(USBH1_RST, "usb_rst"); gpio_direction_output(USBH1_RST, 0); msleep(20); gpio_set_value(USBH1_RST, 1); imx51_add_mxc_ehci_hs(1, &usbh1_config); #ifdef CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD eukrea_mbimxsd51_baseboard_init(); #endif } static void __init eukrea_cpuimx51sd_timer_init(void) { mx51_clocks_init(32768, 24000000, 22579200, 0); } static struct sys_timer mxc_timer = { .init = eukrea_cpuimx51sd_timer_init, }; MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD") /* Maintainer: Eric Bénard <eric@eukrea.com> */ .atag_offset = 0x100, .map_io = mx51_map_io, .init_early = imx51_init_early, .init_irq = mx51_init_irq, .handle_irq = imx51_handle_irq, .timer = &mxc_timer, .init_machine = eukrea_cpuimx51sd_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
davidmueller13/android_kernel_samsung_lt03lte-5
lib/flex_array.c
7133
11135
/* * Flexible array managed in PAGE_SIZE parts * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2009 * * Author: Dave Hansen <dave@linux.vnet.ibm.com> */ #include <linux/flex_array.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/export.h> #include <linux/reciprocal_div.h> struct flex_array_part { char elements[FLEX_ARRAY_PART_SIZE]; }; /* * If a user requests an allocation which is small * enough, we may simply use the space in the * flex_array->parts[] array to store the user * data. */ static inline int elements_fit_in_base(struct flex_array *fa) { int data_size = fa->element_size * fa->total_nr_elements; if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) return 1; return 0; } /** * flex_array_alloc - allocate a new flexible array * @element_size: the size of individual elements in the array * @total: total number of elements that this should hold * @flags: page allocation flags to use for base array * * Note: all locking must be provided by the caller. * * @total is used to size internal structures. If the user ever * accesses any array indexes >=@total, it will produce errors. * * The maximum number of elements is defined as: the number of * elements that can be stored in a page times the number of * page pointers that we can fit in the base structure or (using * integer math): * * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *) * * Here's a table showing example capacities. Note that the maximum * index that the get/put() functions is just nr_objects-1. This * basically means that you get 4MB of storage on 32-bit and 2MB on * 64-bit. * * * Element size | Objects | Objects | * PAGE_SIZE=4k | 32-bit | 64-bit | * ---------------------------------| * 1 bytes | 4177920 | 2088960 | * 2 bytes | 2088960 | 1044480 | * 3 bytes | 1392300 | 696150 | * 4 bytes | 1044480 | 522240 | * 32 bytes | 130560 | 65408 | * 33 bytes | 126480 | 63240 | * 2048 bytes | 2040 | 1020 | * 2049 bytes | 1020 | 510 | * void * | 1044480 | 261120 | * * Since 64-bit pointers are twice the size, we lose half the * capacity in the base structure. Also note that no effort is made * to efficiently pack objects across page boundaries. */ struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags) { struct flex_array *ret; int elems_per_part = 0; int reciprocal_elems = 0; int max_size = 0; if (element_size) { elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); reciprocal_elems = reciprocal_value(elems_per_part); max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part; } /* max_size will end up 0 if element_size > PAGE_SIZE */ if (total > max_size) return NULL; ret = kzalloc(sizeof(struct flex_array), flags); if (!ret) return NULL; ret->element_size = element_size; ret->total_nr_elements = total; ret->elems_per_part = elems_per_part; ret->reciprocal_elems = reciprocal_elems; if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) memset(&ret->parts[0], FLEX_ARRAY_FREE, FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } EXPORT_SYMBOL(flex_array_alloc); static int fa_element_to_part_nr(struct flex_array *fa, unsigned int element_nr) { return reciprocal_divide(element_nr, fa->reciprocal_elems); } /** * flex_array_free_parts - just free the second-level pages * @fa: the flex array from which to free parts * * This is to be used in cases where the base 'struct flex_array' * has been statically allocated and should not be free. */ void flex_array_free_parts(struct flex_array *fa) { int part_nr; if (elements_fit_in_base(fa)) return; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) kfree(fa->parts[part_nr]); } EXPORT_SYMBOL(flex_array_free_parts); void flex_array_free(struct flex_array *fa) { flex_array_free_parts(fa); kfree(fa); } EXPORT_SYMBOL(flex_array_free); static unsigned int index_inside_part(struct flex_array *fa, unsigned int element_nr, unsigned int part_nr) { unsigned int part_offset; part_offset = element_nr - part_nr * fa->elems_per_part; return part_offset * fa->element_size; } static struct flex_array_part * __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) { struct flex_array_part *part = fa->parts[part_nr]; if (!part) { part = kmalloc(sizeof(struct flex_array_part), flags); if (!part) return NULL; if (!(flags & __GFP_ZERO)) memset(part, FLEX_ARRAY_FREE, sizeof(struct flex_array_part)); fa->parts[part_nr] = part; } return part; } /** * flex_array_put - copy data into the array at @element_nr * @fa: the flex array to copy data into * @element_nr: index of the position in which to insert * the new element. * @src: address of data to copy into the array * @flags: page allocation flags to use for array expansion * * * Note that this *copies* the contents of @src into * the array. If you are trying to store an array of * pointers, make sure to pass in &ptr instead of ptr. * You may instead wish to use the flex_array_put_ptr() * helper function. * * Locking must be provided by the caller. */ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, gfp_t flags) { int part_nr = 0; struct flex_array_part *part; void *dst; if (element_nr >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = __fa_get_part(fa, part_nr, flags); if (!part) return -ENOMEM; } dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; memcpy(dst, src, fa->element_size); return 0; } EXPORT_SYMBOL(flex_array_put); /** * flex_array_clear - clear element in array at @element_nr * @fa: the flex array of the element. * @element_nr: index of the position to clear. * * Locking must be provided by the caller. */ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) { int part_nr = 0; struct flex_array_part *part; void *dst; if (element_nr >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = fa->parts[part_nr]; if (!part) return -EINVAL; } dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; memset(dst, FLEX_ARRAY_FREE, fa->element_size); return 0; } EXPORT_SYMBOL(flex_array_clear); /** * flex_array_prealloc - guarantee that array space exists * @fa: the flex array for which to preallocate parts * @start: index of first array element for which space is allocated * @nr_elements: number of elements for which space is allocated * @flags: page allocation flags * * This will guarantee that no future calls to flex_array_put() * will allocate memory. It can be used if you are expecting to * be holding a lock or in some atomic context while writing * data into the array. * * Locking must be provided by the caller. */ int flex_array_prealloc(struct flex_array *fa, unsigned int start, unsigned int nr_elements, gfp_t flags) { int start_part; int end_part; int part_nr; unsigned int end; struct flex_array_part *part; if (!start && !nr_elements) return 0; if (start >= fa->total_nr_elements) return -ENOSPC; if (!nr_elements) return 0; end = start + nr_elements - 1; if (end >= fa->total_nr_elements) return -ENOSPC; if (!fa->element_size) return 0; if (elements_fit_in_base(fa)) return 0; start_part = fa_element_to_part_nr(fa, start); end_part = fa_element_to_part_nr(fa, end); for (part_nr = start_part; part_nr <= end_part; part_nr++) { part = __fa_get_part(fa, part_nr, flags); if (!part) return -ENOMEM; } return 0; } EXPORT_SYMBOL(flex_array_prealloc); /** * flex_array_get - pull data back out of the array * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns a pointer to the data at index @element_nr. Note * that this is a copy of the data that was passed in. If you * are using this to store pointers, you'll get back &ptr. You * may instead wish to use the flex_array_get_ptr helper. * * Locking must be provided by the caller. */ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) { int part_nr = 0; struct flex_array_part *part; if (!fa->element_size) return NULL; if (element_nr >= fa->total_nr_elements) return NULL; if (elements_fit_in_base(fa)) part = (struct flex_array_part *)&fa->parts[0]; else { part_nr = fa_element_to_part_nr(fa, element_nr); part = fa->parts[part_nr]; if (!part) return NULL; } return &part->elements[index_inside_part(fa, element_nr, part_nr)]; } EXPORT_SYMBOL(flex_array_get); /** * flex_array_get_ptr - pull a ptr back out of the array * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns the pointer placed in the flex array at element_nr using * flex_array_put_ptr(). This function should not be called if the * element in question was not set using the _put_ptr() helper. */ void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) { void **tmp; tmp = flex_array_get(fa, element_nr); if (!tmp) return NULL; return *tmp; } EXPORT_SYMBOL(flex_array_get_ptr); static int part_is_free(struct flex_array_part *part) { int i; for (i = 0; i < sizeof(struct flex_array_part); i++) if (part->elements[i] != FLEX_ARRAY_FREE) return 0; return 1; } /** * flex_array_shrink - free unused second-level pages * @fa: the flex array to shrink * * Frees all second-level pages that consist solely of unused * elements. Returns the number of pages freed. * * Locking must be provided by the caller. */ int flex_array_shrink(struct flex_array *fa) { struct flex_array_part *part; int part_nr; int ret = 0; if (!fa->total_nr_elements || !fa->element_size) return 0; if (elements_fit_in_base(fa)) return ret; for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { part = fa->parts[part_nr]; if (!part) continue; if (part_is_free(part)) { fa->parts[part_nr] = NULL; kfree(part); ret++; } } return ret; } EXPORT_SYMBOL(flex_array_shrink);
gpl-2.0
matianfu/barcelona-3.2.40
drivers/gpu/drm/i915/i915_ioc32.c
9181
7179
/** * \file i915_ioc32.c * * 32-bit ioctl compatibility routines for the i915 DRM. * * \author Alan Hourihane <alanh@fairlite.demon.co.uk> * * * Copyright (C) Paul Mackerras 2005 * Copyright (C) Alan Hourihane 2005 * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/compat.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" typedef struct _drm_i915_batchbuffer32 { int start; /* agp offset */ int used; /* nr bytes in use */ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ u32 cliprects; /* pointer to userspace cliprects */ } drm_i915_batchbuffer32_t; static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, unsigned long arg) { drm_i915_batchbuffer32_t batchbuffer32; drm_i915_batchbuffer_t __user *batchbuffer; if (copy_from_user (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32))) return -EFAULT; batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer)); if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer)) || __put_user(batchbuffer32.start, &batchbuffer->start) || __put_user(batchbuffer32.used, &batchbuffer->used) || __put_user(batchbuffer32.DR1, &batchbuffer->DR1) || __put_user(batchbuffer32.DR4, &batchbuffer->DR4) || __put_user(batchbuffer32.num_cliprects, &batchbuffer->num_cliprects) || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects, &batchbuffer->cliprects)) return -EFAULT; return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER, (unsigned long)batchbuffer); } typedef struct _drm_i915_cmdbuffer32 { u32 buf; /* pointer to userspace command buffer */ int sz; /* nr bytes in buf */ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */ int num_cliprects; /* mulitpass with multiple cliprects? */ u32 cliprects; /* pointer to userspace cliprects */ } drm_i915_cmdbuffer32_t; static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, unsigned long arg) { drm_i915_cmdbuffer32_t cmdbuffer32; drm_i915_cmdbuffer_t __user *cmdbuffer; if (copy_from_user (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32))) return -EFAULT; cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer)); if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer)) || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, &cmdbuffer->buf) || __put_user(cmdbuffer32.sz, &cmdbuffer->sz) || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1) || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4) || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects) || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects, &cmdbuffer->cliprects)) return -EFAULT; return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); } typedef struct drm_i915_irq_emit32 { u32 irq_seq; } drm_i915_irq_emit32_t; static int compat_i915_irq_emit(struct file *file, unsigned int cmd, unsigned long arg) { drm_i915_irq_emit32_t req32; drm_i915_irq_emit_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user((int __user *)(unsigned long)req32.irq_seq, &request->irq_seq)) return -EFAULT; return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); } typedef struct drm_i915_getparam32 { int param; u32 value; } drm_i915_getparam32_t; static int compat_i915_getparam(struct file *file, unsigned int cmd, unsigned long arg) { drm_i915_getparam32_t req32; drm_i915_getparam_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.param, &request->param) || __put_user((void __user *)(unsigned long)req32.value, &request->value)) return -EFAULT; return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, (unsigned long)request); } typedef struct drm_i915_mem_alloc32 { int region; int alignment; int size; u32 region_offset; /* offset from start of fb or agp */ } drm_i915_mem_alloc32_t; static int compat_i915_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_i915_mem_alloc32_t req32; drm_i915_mem_alloc_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.region, &request->region) || __put_user(req32.alignment, &request->alignment) || __put_user(req32.size, &request->size) || __put_user((void __user *)(unsigned long)req32.region_offset, &request->region_offset)) return -EFAULT; return drm_ioctl(file, DRM_IOCTL_I915_ALLOC, (unsigned long)request); } drm_ioctl_compat_t *i915_compat_ioctls[] = { [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, [DRM_I915_GETPARAM] = compat_i915_getparam, [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, [DRM_I915_ALLOC] = compat_i915_alloc }; /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; if (fn != NULL) ret = (*fn) (filp, cmd, arg); else ret = drm_ioctl(filp, cmd, arg); return ret; }
gpl-2.0
NoelMacwan/Kernel-C6806-KOT49H.S2.2052
arch/score/lib/checksum_copy.c
11997
1488
/* * arch/score/lib/csum_partial_copy.c * * Score Processor version. * * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Lennox Wu <lennox.wu@sunplusct.com> * Chen Liqin <liqin.chen@sunplusct.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <net/checksum.h> #include <asm/uaccess.h> unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int sum) { sum = csum_partial(src, len, sum); memcpy(dst, src, len); return sum; } unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *err_ptr) { int missing; missing = copy_from_user(dst, src, len); if (missing) { memset(dst + len - missing, 0, missing); *err_ptr = -EFAULT; } return csum_partial(dst, len, sum); }
gpl-2.0
Mustaavalkosta/toolchain_gcc-4.8
gcc/testsuite/gcc.c-torture/execute/20020508-3.c
222
2236
#include <limits.h> #ifndef CHAR_BIT #define CHAR_BIT 8 #endif #define ROR(a,b) (((a) >> (b)) | ((a) << ((sizeof (a) * CHAR_BIT) - (b)))) #define ROL(a,b) (((a) << (b)) | ((a) >> ((sizeof (a) * CHAR_BIT) - (b)))) #define CHAR_VALUE ((char)0xf234) #define SHORT_VALUE ((short)0xf234) #define INT_VALUE ((int)0xf234) #define LONG_VALUE ((long)0xf2345678L) #define LL_VALUE ((long long)0xf2345678abcdef0LL) #define SHIFT1 4 #define SHIFT2 ((sizeof (long long) * CHAR_BIT) - SHIFT1) char c = CHAR_VALUE; short s = SHORT_VALUE; int i = INT_VALUE; long l = LONG_VALUE; long long ll = LL_VALUE; int shift1 = SHIFT1; int shift2 = SHIFT2; main () { if (ROR (c, shift1) != ROR (CHAR_VALUE, SHIFT1)) abort (); if (ROR (c, SHIFT1) != ROR (CHAR_VALUE, SHIFT1)) abort (); if (ROR (s, shift1) != ROR (SHORT_VALUE, SHIFT1)) abort (); if (ROR (s, SHIFT1) != ROR (SHORT_VALUE, SHIFT1)) abort (); if (ROR (i, shift1) != ROR (INT_VALUE, SHIFT1)) abort (); if (ROR (i, SHIFT1) != ROR (INT_VALUE, SHIFT1)) abort (); if (ROR (l, shift1) != ROR (LONG_VALUE, SHIFT1)) abort (); if (ROR (l, SHIFT1) != ROR (LONG_VALUE, SHIFT1)) abort (); if (ROR (ll, shift1) != ROR (LL_VALUE, SHIFT1)) abort (); if (ROR (ll, SHIFT1) != ROR (LL_VALUE, SHIFT1)) abort (); if (ROR (ll, shift2) != ROR (LL_VALUE, SHIFT2)) abort (); if (ROR (ll, SHIFT2) != ROR (LL_VALUE, SHIFT2)) abort (); if (ROL (c, shift1) != ROL (CHAR_VALUE, SHIFT1)) abort (); if (ROL (c, SHIFT1) != ROL (CHAR_VALUE, SHIFT1)) abort (); if (ROL (s, shift1) != ROL (SHORT_VALUE, SHIFT1)) abort (); if (ROL (s, SHIFT1) != ROL (SHORT_VALUE, SHIFT1)) abort (); if (ROL (i, shift1) != ROL (INT_VALUE, SHIFT1)) abort (); if (ROL (i, SHIFT1) != ROL (INT_VALUE, SHIFT1)) abort (); if (ROL (l, shift1) != ROL (LONG_VALUE, SHIFT1)) abort (); if (ROL (l, SHIFT1) != ROL (LONG_VALUE, SHIFT1)) abort (); if (ROL (ll, shift1) != ROL (LL_VALUE, SHIFT1)) abort (); if (ROL (ll, SHIFT1) != ROL (LL_VALUE, SHIFT1)) abort (); if (ROL (ll, shift2) != ROL (LL_VALUE, SHIFT2)) abort (); if (ROL (ll, SHIFT2) != ROL (LL_VALUE, SHIFT2)) abort (); exit (0); }
gpl-2.0
mk01/linux-fslc
drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
222
12007
/* * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * Some code and ideas taken from drivers/video/omap/ driver * by Imre Deak. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "MANAGER" #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf) { return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name); } static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_dss_device *dssdev = mgr->get_device(mgr); return snprintf(buf, PAGE_SIZE, "%s\n", dssdev ? dssdev->name : "<none>"); } static int manager_display_match(struct omap_dss_device *dssdev, void *data) { const char *str = data; return sysfs_streq(dssdev->name, str); } static ssize_t manager_display_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { int r = 0; size_t len = size; struct omap_dss_device *dssdev = NULL; struct omap_dss_device *old_dssdev; if (buf[size-1] == '\n') --len; if (len > 0) dssdev = omap_dss_find_device((void *)buf, manager_display_match); if (len > 0 && dssdev == NULL) return -EINVAL; if (dssdev) { DSSDBG("display %s found\n", dssdev->name); if (omapdss_device_is_connected(dssdev)) { DSSERR("new display is already connected\n"); r = -EINVAL; goto put_device; } if (omapdss_device_is_enabled(dssdev)) { DSSERR("new display is not disabled\n"); r = -EINVAL; goto put_device; } } old_dssdev = mgr->get_device(mgr); if (old_dssdev) { if (omapdss_device_is_enabled(old_dssdev)) { DSSERR("old display is not disabled\n"); r = -EINVAL; goto put_device; } old_dssdev->driver->disconnect(old_dssdev); } if (dssdev) { r = dssdev->driver->connect(dssdev); if (r) { DSSERR("failed to connect new device\n"); goto put_device; } old_dssdev = mgr->get_device(mgr); if (old_dssdev != dssdev) { DSSERR("failed to connect device to this manager\n"); dssdev->driver->disconnect(dssdev); goto put_device; } r = mgr->apply(mgr); if (r) { DSSERR("failed to apply dispc config\n"); goto put_device; } } put_device: if (dssdev) omap_dss_put_device(dssdev); return r ? r : size; } static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color); } static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; u32 color; int r; r = kstrtouint(buf, 0, &color); if (r) return r; mgr->get_manager_info(mgr, &info); info.default_color = color; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static const char *trans_key_type_str[] = { "gfx-destination", "video-source", }; static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr, char *buf) { enum omap_dss_trans_key_type key_type; struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); key_type = info.trans_key_type; BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str)); return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]); } static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { enum omap_dss_trans_key_type key_type; struct omap_overlay_manager_info info; int r; for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST; key_type < ARRAY_SIZE(trans_key_type_str); key_type++) { if (sysfs_streq(buf, trans_key_type_str[key_type])) break; } if (key_type == ARRAY_SIZE(trans_key_type_str)) return -EINVAL; mgr->get_manager_info(mgr, &info); info.trans_key_type = key_type; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key); } static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; u32 key_value; int r; r = kstrtouint(buf, 0, &key_value); if (r) return r; mgr->get_manager_info(mgr, &info); info.trans_key = key_value; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled); } static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; bool enable; int r; r = strtobool(buf, &enable); if (r) return r; mgr->get_manager_info(mgr, &info); info.trans_enabled = enable; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static ssize_t manager_alpha_blending_enabled_show( struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) return -ENODEV; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%d\n", info.partial_alpha_enabled); } static ssize_t manager_alpha_blending_enabled_store( struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; bool enable; int r; if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) return -ENODEV; r = strtobool(buf, &enable); if (r) return r; mgr->get_manager_info(mgr, &info); info.partial_alpha_enabled = enable; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable); } static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; int r; bool enable; if (!dss_has_feature(FEAT_CPR)) return -ENODEV; r = strtobool(buf, &enable); if (r) return r; mgr->get_manager_info(mgr, &info); if (info.cpr_enable == enable) return size; info.cpr_enable = enable; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr, char *buf) { struct omap_overlay_manager_info info; mgr->get_manager_info(mgr, &info); return snprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d %d %d %d\n", info.cpr_coefs.rr, info.cpr_coefs.rg, info.cpr_coefs.rb, info.cpr_coefs.gr, info.cpr_coefs.gg, info.cpr_coefs.gb, info.cpr_coefs.br, info.cpr_coefs.bg, info.cpr_coefs.bb); } static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { struct omap_overlay_manager_info info; struct omap_dss_cpr_coefs coefs; int r, i; s16 *arr; if (!dss_has_feature(FEAT_CPR)) return -ENODEV; if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd", &coefs.rr, &coefs.rg, &coefs.rb, &coefs.gr, &coefs.gg, &coefs.gb, &coefs.br, &coefs.bg, &coefs.bb) != 9) return -EINVAL; arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb, coefs.gr, coefs.gg, coefs.gb, coefs.br, coefs.bg, coefs.bb }; for (i = 0; i < 9; ++i) { if (arr[i] < -512 || arr[i] > 511) return -EINVAL; } mgr->get_manager_info(mgr, &info); info.cpr_coefs = coefs; r = mgr->set_manager_info(mgr, &info); if (r) return r; r = mgr->apply(mgr); if (r) return r; return size; } struct manager_attribute { struct attribute attr; ssize_t (*show)(struct omap_overlay_manager *, char *); ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t); }; #define MANAGER_ATTR(_name, _mode, _show, _store) \ struct manager_attribute manager_attr_##_name = \ __ATTR(_name, _mode, _show, _store) static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL); static MANAGER_ATTR(display, S_IRUGO|S_IWUSR, manager_display_show, manager_display_store); static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR, manager_default_color_show, manager_default_color_store); static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR, manager_trans_key_type_show, manager_trans_key_type_store); static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR, manager_trans_key_value_show, manager_trans_key_value_store); static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR, manager_trans_key_enabled_show, manager_trans_key_enabled_store); static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, manager_alpha_blending_enabled_show, manager_alpha_blending_enabled_store); static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR, manager_cpr_enable_show, manager_cpr_enable_store); static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR, manager_cpr_coef_show, manager_cpr_coef_store); static struct attribute *manager_sysfs_attrs[] = { &manager_attr_name.attr, &manager_attr_display.attr, &manager_attr_default_color.attr, &manager_attr_trans_key_type.attr, &manager_attr_trans_key_value.attr, &manager_attr_trans_key_enabled.attr, &manager_attr_alpha_blending_enabled.attr, &manager_attr_cpr_enable.attr, &manager_attr_cpr_coef.attr, NULL }; static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct omap_overlay_manager *manager; struct manager_attribute *manager_attr; manager = container_of(kobj, struct omap_overlay_manager, kobj); manager_attr = container_of(attr, struct manager_attribute, attr); if (!manager_attr->show) return -ENOENT; return manager_attr->show(manager, buf); } static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct omap_overlay_manager *manager; struct manager_attribute *manager_attr; manager = container_of(kobj, struct omap_overlay_manager, kobj); manager_attr = container_of(attr, struct manager_attribute, attr); if (!manager_attr->store) return -ENOENT; return manager_attr->store(manager, buf, size); } static const struct sysfs_ops manager_sysfs_ops = { .show = manager_attr_show, .store = manager_attr_store, }; static struct kobj_type manager_ktype = { .sysfs_ops = &manager_sysfs_ops, .default_attrs = manager_sysfs_attrs, }; int dss_manager_kobj_init(struct omap_overlay_manager *mgr, struct platform_device *pdev) { return kobject_init_and_add(&mgr->kobj, &manager_ktype, &pdev->dev.kobj, "manager%d", mgr->id); } void dss_manager_kobj_uninit(struct omap_overlay_manager *mgr) { kobject_del(&mgr->kobj); kobject_put(&mgr->kobj); memset(&mgr->kobj, 0, sizeof(mgr->kobj)); }
gpl-2.0
ZubairLK/CI20_linux
arch/mips/loongson/common/pci.c
734
2946
/* * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology * Author: Fuxin Zhang, zhangfx@lemote.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <pci.h> #include <loongson.h> #include <boot_param.h> static struct resource loongson_pci_mem_resource = { .name = "pci memory space", .start = LOONGSON_PCI_MEM_START, .end = LOONGSON_PCI_MEM_END, .flags = IORESOURCE_MEM, }; static struct resource loongson_pci_io_resource = { .name = "pci io space", .start = LOONGSON_PCI_IO_START, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct pci_controller loongson_pci_controller = { .pci_ops = &loongson_pci_ops, .io_resource = &loongson_pci_io_resource, .mem_resource = &loongson_pci_mem_resource, .mem_offset = 0x00000000UL, .io_offset = 0x00000000UL, }; static void __init setup_pcimap(void) { /* * local to PCI mapping for CPU accessing PCI space * CPU address space [256M,448M] is window for accessing pci space * we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M] * * pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0 * [<2G] [384M,448M] [320M,384M] [0M,64M] */ LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 | LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) | LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) | LOONGSON_PCIMAP_WIN(0, 0); /* * PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M] */ LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */ /* size: 256M, burst transmission, pre-fetch enable, 64bit */ LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul; LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful; LOONGSON_PCI_HIT1_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT1_SEL_H = 0x00000000ul; LOONGSON_PCI_HIT2_SEL_L = 0x00000006ul; /* set this BAR as invalid */ LOONGSON_PCI_HIT2_SEL_H = 0x00000000ul; /* avoid deadlock of PCI reading/writing lock operation */ LOONGSON_PCI_ISR4C = 0xd2000001ul; /* can not change gnt to break pci transfer when device's gnt not deassert for some broken device */ LOONGSON_PXARB_CFG = 0x00fe0105ul; #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG /* * set cpu addr window2 to map CPU address space to PCI address space */ LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC, LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE); #endif } static int __init pcibios_init(void) { setup_pcimap(); loongson_pci_controller.io_map_base = mips_io_port_base; #ifdef CONFIG_LEFI_FIRMWARE_INTERFACE loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr; loongson_pci_mem_resource.end = loongson_sysconf.pci_mem_end_addr; #endif register_pci_controller(&loongson_pci_controller); return 0; } arch_initcall(pcibios_init);
gpl-2.0
xobs/novena-linux
net/netfilter/xt_AUDIT.c
1758
5201
/* * Creates audit record for dropped/accepted packets * * (C) 2010-2011 Thomas Graf <tgraf@redhat.com> * (C) 2010-2011 Red Hat, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/audit.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_AUDIT.h> #include <linux/netfilter_bridge/ebtables.h> #include <net/ipv6.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>"); MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets"); MODULE_ALIAS("ipt_AUDIT"); MODULE_ALIAS("ip6t_AUDIT"); MODULE_ALIAS("ebt_AUDIT"); MODULE_ALIAS("arpt_AUDIT"); static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb, unsigned int proto, unsigned int offset) { switch (proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_UDPLITE: { const __be16 *pptr; __be16 _ports[2]; pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports); if (pptr == NULL) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " sport=%hu dport=%hu", ntohs(pptr[0]), ntohs(pptr[1])); } break; case IPPROTO_ICMP: case IPPROTO_ICMPV6: { const u8 *iptr; u8 _ih[2]; iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih); if (iptr == NULL) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu", iptr[0], iptr[1]); } break; } } static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb) { struct iphdr _iph; const struct iphdr *ih; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!ih) { audit_log_format(ab, " truncated=1"); return; } audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu", &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol); if (ntohs(ih->frag_off) & IP_OFFSET) { audit_log_format(ab, " frag=1"); return; } audit_proto(ab, skb, ih->protocol, ih->ihl * 4); } static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb) { struct ipv6hdr _ip6h; const struct ipv6hdr *ih; u8 nexthdr; __be16 frag_off; int offset; ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h); if (!ih) { audit_log_format(ab, " truncated=1"); return; } nexthdr = ih->nexthdr; offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h), &nexthdr, &frag_off); audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu", &ih->saddr, &ih->daddr, nexthdr); if (offset) audit_proto(ab, skb, nexthdr, offset); } static unsigned int audit_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_audit_info *info = par->targinfo; struct audit_buffer *ab; if (audit_enabled == 0) goto errout; ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT); if (ab == NULL) goto errout; audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s", info->type, par->hooknum, skb->len, par->in ? par->in->name : "?", par->out ? par->out->name : "?"); if (skb->mark) audit_log_format(ab, " mark=%#x", skb->mark); if (skb->dev && skb->dev->type == ARPHRD_ETHER) { audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ntohs(eth_hdr(skb)->h_proto)); if (par->family == NFPROTO_BRIDGE) { switch (eth_hdr(skb)->h_proto) { case htons(ETH_P_IP): audit_ip4(ab, skb); break; case htons(ETH_P_IPV6): audit_ip6(ab, skb); break; } } } switch (par->family) { case NFPROTO_IPV4: audit_ip4(ab, skb); break; case NFPROTO_IPV6: audit_ip6(ab, skb); break; } #ifdef CONFIG_NETWORK_SECMARK if (skb->secmark) audit_log_secctx(ab, skb->secmark); #endif audit_log_end(ab); errout: return XT_CONTINUE; } static unsigned int audit_tg_ebt(struct sk_buff *skb, const struct xt_action_param *par) { audit_tg(skb, par); return EBT_CONTINUE; } static int audit_tg_check(const struct xt_tgchk_param *par) { const struct xt_audit_info *info = par->targinfo; if (info->type > XT_AUDIT_TYPE_MAX) { pr_info("Audit type out of range (valid range: 0..%hhu)\n", XT_AUDIT_TYPE_MAX); return -ERANGE; } return 0; } static struct xt_target audit_tg_reg[] __read_mostly = { { .name = "AUDIT", .family = NFPROTO_UNSPEC, .target = audit_tg, .targetsize = sizeof(struct xt_audit_info), .checkentry = audit_tg_check, .me = THIS_MODULE, }, { .name = "AUDIT", .family = NFPROTO_BRIDGE, .target = audit_tg_ebt, .targetsize = sizeof(struct xt_audit_info), .checkentry = audit_tg_check, .me = THIS_MODULE, }, }; static int __init audit_tg_init(void) { return xt_register_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); } static void __exit audit_tg_exit(void) { xt_unregister_targets(audit_tg_reg, ARRAY_SIZE(audit_tg_reg)); } module_init(audit_tg_init); module_exit(audit_tg_exit);
gpl-2.0
CyanogenMod/android_kernel_htc_enrc2b
drivers/scsi/in2000.c
3038
73554
/* * in2000.c - Linux device driver for the * Always IN2000 ISA SCSI card. * * Copyright (c) 1996 John Shifflett, GeoLog Consulting * john@geolog.com * jshiffle@netcom.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. * * Drew Eckhardt's excellent 'Generic NCR5380' sources provided * much of the inspiration and some of the code for this driver. * The Linux IN2000 driver distributed in the Linux kernels through * version 1.2.13 was an extremely valuable reference on the arcane * (and still mysterious) workings of the IN2000's fifo. It also * is where I lifted in2000_biosparam(), the gist of the card * detection scheme, and other bits of code. Many thanks to the * talented and courageous people who wrote, contributed to, and * maintained that driver (including Brad McLean, Shaun Savage, * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey, * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric * Youngdale). I should also mention the driver written by * Hamish Macdonald for the (GASP!) Amiga A2091 card, included * in the Linux-m68k distribution; it gave me a good initial * understanding of the proper way to run a WD33c93 chip, and I * ended up stealing lots of code from it. * * _This_ driver is (I feel) an improvement over the old one in * several respects: * - All problems relating to the data size of a SCSI request are * gone (as far as I know). The old driver couldn't handle * swapping to partitions because that involved 4k blocks, nor * could it deal with the st.c tape driver unmodified, because * that usually involved 4k - 32k blocks. The old driver never * quite got away from a morbid dependence on 2k block sizes - * which of course is the size of the card's fifo. * * - Target Disconnection/Reconnection is now supported. Any * system with more than one device active on the SCSI bus * will benefit from this. The driver defaults to what I'm * calling 'adaptive disconnect' - meaning that each command * is evaluated individually as to whether or not it should * be run with the option to disconnect/reselect (if the * device chooses), or as a "SCSI-bus-hog". * * - Synchronous data transfers are now supported. Because there * are a few devices (and many improperly terminated systems) * that choke when doing sync, the default is sync DISABLED * for all devices. This faster protocol can (and should!) * be enabled on selected devices via the command-line. * * - Runtime operating parameters can now be specified through * either the LILO or the 'insmod' command line. For LILO do: * "in2000=blah,blah,blah" * and with insmod go like: * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah" * The defaults should be good for most people. See the comment * for 'setup_strings' below for more details. * * - The old driver relied exclusively on what the Western Digital * docs call "Combination Level 2 Commands", which are a great * idea in that the CPU is relieved of a lot of interrupt * overhead. However, by accepting a certain (user-settable) * amount of additional interrupts, this driver achieves * better control over the SCSI bus, and data transfers are * almost as fast while being much easier to define, track, * and debug. * * - You can force detection of a card whose BIOS has been disabled. * * - Multiple IN2000 cards might almost be supported. I've tried to * keep it in mind, but have no way to test... * * * TODO: * tagged queuing. multiple cards. * * * NOTE: * When using this or any other SCSI driver as a module, you'll * find that with the stock kernel, at most _two_ SCSI hard * drives will be linked into the device list (ie, usable). * If your IN2000 card has more than 2 disks on its bus, you * might want to change the define of 'SD_EXTRA_DEVS' in the * 'hosts.h' file from 2 to whatever is appropriate. It took * me a while to track down this surprisingly obscure and * undocumented little "feature". * * * People with bug reports, wish-lists, complaints, comments, * or improvements are asked to pah-leeez email me (John Shifflett) * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get * this thing into as good a shape as possible, and I'm positive * there are lots of lurking bugs and "Stupid Places". * * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk> * - Using new_eh handler * - Hopefully got all the locking right again * See "FIXME" notes for items that could do with more work */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/stat.h> #include <asm/io.h> #include <asm/system.h> #include "scsi.h" #include <scsi/scsi_host.h> #define IN2000_VERSION "1.33-2.5" #define IN2000_DATE "2002/11/03" #include "in2000.h" /* * 'setup_strings' is a single string used to pass operating parameters and * settings from the kernel/module command-line to the driver. 'setup_args[]' * is an array of strings that define the compile-time default values for * these settings. If Linux boots with a LILO or insmod command-line, those * settings are combined with 'setup_args[]'. Note that LILO command-lines * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix. * The driver recognizes the following keywords (lower case required) and * arguments: * * - ioport:addr -Where addr is IO address of a (usually ROM-less) card. * - noreset -No optional args. Prevents SCSI bus reset at boot time. * - nosync:x -x is a bitmask where the 1st 7 bits correspond with * the 7 possible SCSI devices (bit 0 for device #0, etc). * Set a bit to PREVENT sync negotiation on that device. * The driver default is sync DISABLED on all devices. * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer * period. Default is 500; acceptable values are 250 - 1000. * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. * x = 1 does 'adaptive' disconnects, which is the default * and generally the best choice. * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes * various types of debug output to printed - see the DB_xxx * defines in in2000.h * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that * determines how the /proc interface works and what it * does - see the PR_xxx defines in in2000.h * * Syntax Notes: * - Numeric arguments can be decimal or the '0x' form of hex notation. There * _must_ be a colon between a keyword and its numeric argument, with no * spaces. * - Keywords are separated by commas, no spaces, in the standard kernel * command-line manner. * - A keyword in the 'nth' comma-separated command-line member will overwrite * the 'nth' element of setup_args[]. A blank command-line member (in * other words, a comma with no preceding keyword) will _not_ overwrite * the corresponding setup_args[] element. * * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'): * - in2000=ioport:0x220,noreset * - in2000=period:250,disconnect:2,nosync:0x03 * - in2000=debug:0x1e * - in2000=proc:3 */ /* Normally, no defaults are specified... */ static char *setup_args[] = { "", "", "", "", "", "", "", "", "" }; /* filled in by 'insmod' */ static char *setup_strings; module_param(setup_strings, charp, 0); static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num) { write1_io(reg_num, IO_WD_ADDR); return read1_io(IO_WD_DATA); } #define READ_AUX_STAT() read1_io(IO_WD_ASR) static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value) { write1_io(reg_num, IO_WD_ADDR); write1_io(value, IO_WD_DATA); } static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd) { /* while (READ_AUX_STAT() & ASR_CIP) printk("|");*/ write1_io(WD_COMMAND, IO_WD_ADDR); write1_io(cmd, IO_WD_DATA); } static uchar read_1_byte(struct IN2000_hostdata *hostdata) { uchar asr, x = 0; write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80); do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) x = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); return x; } static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value) { write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); write1_io((value >> 16), IO_WD_DATA); write1_io((value >> 8), IO_WD_DATA); write1_io(value, IO_WD_DATA); } static unsigned long read_3393_count(struct IN2000_hostdata *hostdata) { unsigned long value; write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR); value = read1_io(IO_WD_DATA) << 16; value |= read1_io(IO_WD_DATA) << 8; value |= read1_io(IO_WD_DATA); return value; } /* The 33c93 needs to be told which direction a command transfers its * data; we use this function to figure it out. Returns true if there * will be a DATA_OUT phase with this command, false otherwise. * (Thanks to Joerg Dorchain for the research and suggestion.) */ static int is_dir_out(Scsi_Cmnd * cmd) { switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER: case WRITE_VERIFY: case WRITE_VERIFY_12: case COMPARE: case COPY: case COPY_VERIFY: case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW: case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12: case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE: case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT: case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK: case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG: case 0xea: return 1; default: return 0; } } static struct sx_period sx_table[] = { {1, 0x20}, {252, 0x20}, {376, 0x30}, {500, 0x40}, {624, 0x50}, {752, 0x60}, {876, 0x70}, {1000, 0x00}, {0, 0} }; static int round_period(unsigned int period) { int x; for (x = 1; sx_table[x].period_ns; x++) { if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) { return x; } } return 7; } static uchar calc_sync_xfer(unsigned int period, unsigned int offset) { uchar result; period *= 4; /* convert SDTR code to ns */ result = sx_table[round_period(period)].reg_value; result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; return result; } static void in2000_execute(struct Scsi_Host *instance); static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0])) /* Set up a few fields in the Scsi_Cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue * - scsi_done points to the routine we call when a cmd is finished * - result is what you'd expect */ cmd->host_scribble = NULL; cmd->scsi_done = done; cmd->result = 0; /* We use the Scsi_Pointer structure that's included with each command * as a scratchpad (as it's intended to be used!). The handy thing about * the SCp.xxx fields is that they're always associated with a given * cmd, and are preserved across disconnect-reselect. This means we * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages * if we keep all the critical pointers and counters in SCp: * - SCp.ptr is the pointer into the RAM buffer * - SCp.this_residual is the size of that buffer * - SCp.buffer points to the current scatter-gather buffer * - SCp.buffers_residual tells us how many S.G. buffers there are * - SCp.have_data_in helps keep track of >2048 byte transfers * - SCp.sent_command is not used * - SCp.phase records this command's SRCID_ER bit setting */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } cmd->SCp.have_data_in = 0; /* We don't set SCp.phase here - that's done in in2000_execute() */ /* WD docs state that at the conclusion of a "LEVEL2" command, the * status byte can be retrieved from the LUN register. Apparently, * this is the case only for *uninterrupted* LEVEL2 commands! If * there are any unexpected phases entered, even if they are 100% * legal (different devices may choose to do things differently), * the LEVEL2 command sequence is exited. This often occurs prior * to receiving the status byte, in which case the driver does a * status phase interrupt and gets the status byte on its own. * While such a command can then be "resumed" (ie restarted to * finish up as a LEVEL2 command), the LUN register will NOT be * a valid status byte at the command's conclusion, and we must * use the byte obtained during the earlier interrupt. Here, we * preset SCp.Status to an illegal value (0xff) so that when * this command finally completes, we can tell where the actual * status byte is stored. */ cmd->SCp.Status = ILLEGAL_STATUS_BYTE; /* We need to disable interrupts before messing with the input * queue and calling in2000_execute(). */ /* * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE * commands are added to the head of the queue so that the desired * sense data is not lost before REQUEST_SENSE executes. */ if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { /* find the end of the queue */ for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble); tmp->host_scribble = (uchar *) cmd; } /* We know that there's at least one command in 'input_Q' now. * Go see if any of them are runnable! */ in2000_execute(cmd->device->host); DB(DB_QUEUE_COMMAND, printk(")Q ")) return 0; } static DEF_SCSI_QCMD(in2000_queuecommand) /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through * the input_Q, using the first command we find that's intended * for a currently non-busy target/lun. * Note that this function is always called with interrupts already * disabled (either from in2000_queuecommand() or in2000_intr()). */ static void in2000_execute(struct Scsi_Host *instance) { struct IN2000_hostdata *hostdata; Scsi_Cmnd *cmd, *prev; int i; unsigned short *sp; unsigned short f; unsigned short flushbuf[16]; hostdata = (struct IN2000_hostdata *) instance->hostdata; DB(DB_EXECUTE, printk("EX(")) if (hostdata->selecting || hostdata->connected) { DB(DB_EXECUTE, printk(")EX-0 ")) return; } /* * Search through the input_Q for a command destined * for an idle target/lun. */ cmd = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (cmd) { if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) break; prev = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* quit if queue empty or all possible targets are busy */ if (!cmd) { DB(DB_EXECUTE, printk(")EX-1 ")) return; } /* remove command from queue */ if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble; #ifdef PROC_STATISTICS hostdata->cmd_cnt[cmd->device->id]++; #endif /* * Start the selection process */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); /* Now we need to figure out whether or not this command is a good * candidate for disconnect/reselect. We guess to the best of our * ability, based on a set of hierarchical rules. When several * devices are operating simultaneously, disconnects are usually * an advantage. In a single device system, or if only 1 device * is being accessed, transfers usually go faster if disconnects * are not allowed: * * + Commands should NEVER disconnect if hostdata->disconnect = * DIS_NEVER (this holds for tape drives also), and ALWAYS * disconnect if hostdata->disconnect = DIS_ALWAYS. * + Tape drive commands should always be allowed to disconnect. * + Disconnect should be allowed if disconnected_Q isn't empty. * + Commands should NOT disconnect if input_Q is empty. * + Disconnect should be allowed if there are commands in input_Q * for a different target/lun. In this case, the other commands * should be made disconnect-able, if not already. * * I know, I know - this code would flunk me out of any * "C Programming 101" class ever offered. But it's easy * to change around and experiment with for now. */ cmd->SCp.phase = 0; /* assume no disconnect */ if (hostdata->disconnect == DIS_NEVER) goto no; if (hostdata->disconnect == DIS_ALWAYS) goto yes; if (cmd->device->type == 1) /* tape drive? */ goto yes; if (hostdata->disconnected_Q) /* other commands disconnected? */ goto yes; if (!(hostdata->input_Q)) /* input_Q empty? */ goto no; for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) { if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) { for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) prev->SCp.phase = 1; goto yes; } } goto no; yes: cmd->SCp.phase = 1; #ifdef PROC_STATISTICS hostdata->disc_allowed_cnt[cmd->device->id]++; #endif no: write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { /* * Do a 'Select-With-ATN' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * CSR_SELECT: success - proceed. */ hostdata->selecting = cmd; /* Every target has its own synchronous transfer setting, kept in * the sync_xfer array, and a corresponding status byte in sync_stat[]. * Each target's sync_stat[] entry is initialized to SS_UNSET, and its * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET * means that the parameters are undetermined as yet, and that we * need to send an SDTR message to this device after selection is * complete. We set SS_FIRST to tell the interrupt routine to do so, * unless we don't want to even _try_ synchronous transfers: In this * case we set SS_SET to make the defaults final. */ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) { if (hostdata->sync_off & (1 << cmd->device->id)) hostdata->sync_stat[cmd->device->id] = SS_SET; else hostdata->sync_stat[cmd->device->id] = SS_FIRST; } hostdata->state = S_SELECTING; write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ write_3393_cmd(hostdata, WD_CMD_SEL_ATN); } else { /* * Do a 'Select-With-ATN-Xfer' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * anything else: success - proceed. */ hostdata->connected = cmd; write_3393(hostdata, WD_COMMAND_PHASE, 0); /* copy command_descriptor_block into WD chip * (take advantage of auto-incrementing) */ write1_io(WD_CDB_1, IO_WD_ADDR); for (i = 0; i < cmd->cmd_len; i++) write1_io(cmd->cmnd[i], IO_WD_DATA); /* The wd33c93 only knows about Group 0, 1, and 5 commands when * it's doing a 'select-and-transfer'. To be safe, we write the * size of the CDB into the OWN_ID register for every case. This * way there won't be problems with vendor-unique, audio, etc. */ write_3393(hostdata, WD_OWN_ID, cmd->cmd_len); /* When doing a non-disconnect command, we can save ourselves a DATA * phase interrupt later by setting everything up now. With writes we * need to pre-fill the fifo; if there's room for the 32 flush bytes, * put them in there too - that'll avoid a fifo interrupt. Reads are * somewhat simpler. * KLUDGE NOTE: It seems that you can't completely fill the fifo here: * This results in the IO_FIFO_COUNT register rolling over to zero, * and apparently the gate array logic sees this as empty, not full, * so the 3393 chip is never signalled to start reading from the * fifo. Or maybe it's seen as a permanent fifo interrupt condition. * Regardless, we fix this by temporarily pretending that the fifo * is 16 bytes smaller. (I see now that the old driver has a comment * about "don't fill completely" in an analogous place - must be the * same deal.) This results in CDROM, swap partitions, and tape drives * needing an extra interrupt per write command - I think we can live * with that! */ if (!(cmd->SCp.phase)) { write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */ if (is_dir_out(cmd)) { hostdata->fifo = FI_FIFO_WRITING; if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16)) i = IN2000_FIFO_SIZE - 16; cmd->SCp.have_data_in = i; /* this much data in fifo */ i >>= 1; /* Gulp. Assuming modulo 2. */ sp = (unsigned short *) cmd->SCp.ptr; f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif /* Is there room for the flush bytes? */ if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) { sp = flushbuf; i = 16; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(0, IO_FIFO); #endif } } else { write1_io(0, IO_FIFO_READ); /* put fifo in read mode */ hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; /* nothing transferred yet */ } } else { write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */ } hostdata->state = S_RUNNING_LEVEL2; write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } /* * Since the SCSI bus can handle only 1 connection at a time, * we get out of here now. If the selection fails, or when * the command disconnects, we'll come back to this routine * to search the input_Q again... */ DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata) { uchar asr; DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out")) write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_count(hostdata, cnt); write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); if (data_in_dir) { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) *buf++ = read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT)); } else { do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) write_3393(hostdata, WD_DATA, *buf++); } while (!(asr & ASR_INT)); } /* Note: we are returning with the interrupt UN-cleared. * Since (presumably) an entire I/O operation has * completed, the bus phase is probably different, and * the interrupt routine will discover this when it * responds to the uncleared int. */ } static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir) { struct IN2000_hostdata *hostdata; unsigned short *sp; unsigned short f; int i; hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata; /* Normally, you'd expect 'this_residual' to be non-zero here. * In a series of scatter-gather transfers, however, this * routine will usually be called with 'this_residual' equal * to 0 and 'buffers_residual' non-zero. This means that a * previous transfer completed, clearing 'this_residual', and * now we need to setup the next scatter-gather buffer as the * source or destination for THIS transfer. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } /* Set up hardware registers */ write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); write_3393_count(hostdata, cmd->SCp.this_residual); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS); write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */ /* Reading is easy. Just issue the command and return - we'll * get an interrupt later when we have actual data to worry about. */ if (data_in_dir) { write1_io(0, IO_FIFO_READ); if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_READING; cmd->SCp.have_data_in = 0; return; } /* Writing is more involved - we'll start the WD chip and write as * much data to the fifo as we can right now. Later interrupts will * write any bytes that don't make it at this stage. */ if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_3393_cmd(hostdata, WD_CMD_TRANS_INFO); hostdata->fifo = FI_FIFO_WRITING; sp = (unsigned short *) cmd->SCp.ptr; if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE) i = IN2000_FIFO_SIZE; cmd->SCp.have_data_in = i; i >>= 1; /* Gulp. We assume this_residual is modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_WRITE_IO FAST_WRITE2_IO(); #else while (i--) write2_io(*sp++, IO_FIFO); #endif } /* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this * function in order to work in an SMP environment. (I'd be surprised * if the driver is ever used by anyone on a real multi-CPU motherboard, * but it _does_ need to be able to compile and run in an SMP kernel.) */ static irqreturn_t in2000_intr(int irqnum, void *dev_id) { struct Scsi_Host *instance = dev_id; struct IN2000_hostdata *hostdata; Scsi_Cmnd *patch, *cmd; uchar asr, sr, phs, id, lun, *ucp, msg; int i, j; unsigned long length; unsigned short *sp; unsigned short f; unsigned long flags; hostdata = (struct IN2000_hostdata *) instance->hostdata; /* Get the spin_lock and disable further ints, for SMP */ spin_lock_irqsave(instance->host_lock, flags); #ifdef PROC_STATISTICS hostdata->int_cnt++; #endif /* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined * with a big logic array, so it's a little different than what you might * expect). As far as I know, there's no reason that BOTH can't be active * at the same time, but there's a problem: while we can read the 3393 * to tell if _it_ wants an interrupt, I don't know of a way to ask the * fifo the same question. The best we can do is check the 3393 and if * it _isn't_ the source of the interrupt, then we can be pretty sure * that the fifo is the culprit. * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the * IO_FIFO_COUNT register mirrors the fifo interrupt state. I * assume that bit clear means interrupt active. As it turns * out, the driver really doesn't need to check for this after * all, so my remarks above about a 'problem' can safely be * ignored. The way the logic is set up, there's no advantage * (that I can see) to worrying about it. * * It seems that the fifo interrupt signal is negated when we extract * bytes during read or write bytes during write. * - fifo will interrupt when data is moving from it to the 3393, and * there are 31 (or less?) bytes left to go. This is sort of short- * sighted: what if you don't WANT to do more? In any case, our * response is to push more into the fifo - either actual data or * dummy bytes if need be. Note that we apparently have to write at * least 32 additional bytes to the fifo after an interrupt in order * to get it to release the ones it was holding on to - writing fewer * than 32 will result in another fifo int. * UPDATE: Again, info from Bill Earnest makes this more understandable: * 32 bytes = two counts of the fifo counter register. He tells * me that the fifo interrupt is a non-latching signal derived * from a straightforward boolean interpretation of the 7 * highest bits of the fifo counter and the fifo-read/fifo-write * state. Who'd a thought? */ write1_io(0, IO_LED_ON); asr = READ_AUX_STAT(); if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */ /* Ok. This is definitely a FIFO-only interrupt. * * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read, * maybe more to come from the SCSI bus. Read as many as we can out of the * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and * update have_data_in afterwards. * * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move * into the WD3393 chip (I think the interrupt happens when there are 31 * bytes left, but it may be fewer...). The 3393 is still waiting, so we * shove some more into the fifo, which gets things moving again. If the * original SCSI command specified more than 2048 bytes, there may still * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]). * Don't forget to update have_data_in. If we've already written out the * entire buffer, feed 32 dummy bytes to the fifo - they're needed to * push out the remaining real data. * (Big thanks to Bill Earnest for getting me out of the mud in here.) */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ CHECK_NULL(cmd, "fifo_int") if (hostdata->fifo == FI_FIFO_READING) { DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT))) sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = read1_io(IO_FIFO_COUNT) & 0xfe; i <<= 2; /* # of words waiting in the fifo */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } else if (hostdata->fifo == FI_FIFO_WRITING) { DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT))) /* If all bytes have been written to the fifo, flush out the stragglers. * Note that while writing 16 dummy words seems arbitrary, we don't * have another choice that I can see. What we really want is to read * the 3393 transfer count register (that would tell us how many bytes * needed flushing), but the TRANSFER_INFO command hasn't completed * yet (not enough bytes!) and that register won't be accessible. So, * we use 16 words - a number obtained through trial and error. * UPDATE: Bill says this is exactly what Always does, so there. * More thanks due him for help in this section. */ if (cmd->SCp.this_residual == cmd->SCp.have_data_in) { i = 16; while (i--) /* write 32 dummy bytes */ write2_io(0, IO_FIFO); } /* If there are still bytes left in the SCSI buffer, write as many as we * can out to the fifo. */ else { sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */ j = read1_io(IO_FIFO_COUNT) & 0xfe; j <<= 2; /* how many words the fifo has room for */ if ((j << 1) > i) j = (i >> 1); while (j--) write2_io(*sp++, IO_FIFO); i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); i <<= 1; cmd->SCp.have_data_in += i; } } else { printk("*** Spurious FIFO interrupt ***"); } write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } /* This interrupt was triggered by the WD33c93 chip. The fifo interrupt * may also be asserted, but we don't bother to check it: we get more * detailed info from FIFO_READING and FIFO_WRITING (see below). */ cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */ sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */ phs = read_3393(hostdata, WD_COMMAND_PHASE); if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) { printk("\nNR:wd-intr-1\n"); write1_io(0, IO_LED_OFF); /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) /* After starting a FIFO-based transfer, the next _WD3393_ interrupt is * guaranteed to be in response to the completion of the transfer. * If we were reading, there's probably data in the fifo that needs * to be copied into RAM - do that here. Also, we have to update * 'this_residual' and 'ptr' based on the contents of the * TRANSFER_COUNT register, in case the device decided to do an * intermediate disconnect (a device may do this if it has to * do a seek, or just to be nice and let other devices have * some bus time during long transfers). * After doing whatever is necessary with the fifo, we go on and * service the WD3393 interrupt normally. */ if (hostdata->fifo == FI_FIFO_READING) { /* buffer index = start-of-buffer + #-of-bytes-already-read */ sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in); /* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */ i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in; i >>= 1; /* Gulp. We assume this will always be modulo 2 */ f = hostdata->io_base + IO_FIFO; #ifdef FAST_READ_IO FAST_READ2_IO(); #else while (i--) *sp++ = read2_io(IO_FIFO); #endif hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } else if (hostdata->fifo == FI_FIFO_WRITING) { hostdata->fifo = FI_FIFO_UNUSED; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_3393_count(hostdata); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual)) } /* Respond to the specific WD3393 interrupt - there are quite a few! */ switch (sr) { case CSR_TIMEOUT: DB(DB_INTR, printk("TIMEOUT")) if (hostdata->state == S_RUNNING_LEVEL2) hostdata->connected = NULL; else { cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */ CHECK_NULL(cmd, "csr_timeout") hostdata->selecting = NULL; } cmd->result = DID_NO_CONNECT << 16; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; cmd->scsi_done(cmd); /* We are not connected to a target - check to see if there * are commands waiting to be executed. */ in2000_execute(instance); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_SELECT: DB(DB_INTR, printk("SELECT")) hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting; CHECK_NULL(cmd, "csr_select") hostdata->selecting = NULL; /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun); if (cmd->SCp.phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { #ifdef SYNC_DEBUG printk(" sending SDTR "); #endif hostdata->sync_stat[cmd->device->id] = SS_WAITING; /* tack on a 2nd message to ask about synchronous transfers */ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; hostdata->outgoing_msg[2] = 3; hostdata->outgoing_msg[3] = EXTENDED_SDTR; hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4; hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF; hostdata->outgoing_len = 6; } else hostdata->outgoing_len = 1; hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_IN: case CSR_UNEXP | PHS_DATA_IN: case CSR_SRV_REQ | PHS_DATA_IN: DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_IN_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_DATA_OUT: case CSR_UNEXP | PHS_DATA_OUT: case CSR_SRV_REQ | PHS_DATA_OUT: DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(cmd, DATA_OUT_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; break; case CSR_XFER_DONE | PHS_STATUS: case CSR_UNEXP | PHS_STATUS: case CSR_SRV_REQ | PHS_STATUS: DB(DB_INTR, printk("STATUS=")) cmd->SCp.Status = read_1_byte(hostdata); DB(DB_INTR, printk("%02x", cmd->SCp.Status)) if (hostdata->level2 >= L2_BASIC) { sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x50); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); } else { hostdata->state = S_CONNECTED; } break; case CSR_XFER_DONE | PHS_MESS_IN: case CSR_UNEXP | PHS_MESS_IN: case CSR_SRV_REQ | PHS_MESS_IN: DB(DB_INTR, printk("MSG_IN=")) msg = read_1_byte(hostdata); sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ hostdata->incoming_msg[hostdata->incoming_ptr] = msg; if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) msg = EXTENDED_MESSAGE; else hostdata->incoming_ptr = 0; cmd->SCp.Message = msg; switch (msg) { case COMMAND_COMPLETE: DB(DB_INTR, printk("CCMP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; case SAVE_POINTERS: DB(DB_INTR, printk("SDP")) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case RESTORE_POINTERS: DB(DB_INTR, printk("RDP")) if (hostdata->level2 >= L2_BASIC) { write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else { write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; case DISCONNECT: DB(DB_INTR, printk("DIS")) cmd->device->disconnect = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_TMP_DISC; break; case MESSAGE_REJECT: DB(DB_INTR, printk("REJ")) #ifdef SYNC_DEBUG printk("-REJ-"); #endif if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_MESSAGE: DB(DB_INTR, printk("EXT")) ucp = hostdata->incoming_msg; #ifdef SYNC_DEBUG printk("%02x", ucp[hostdata->incoming_ptr]); #endif /* Is this the last byte of the extended message? */ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) { switch (ucp[2]) { /* what's the EXTENDED code? */ case EXTENDED_SDTR: id = calc_sync_xfer(ucp[3], ucp[4]); if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) { /* A device has sent an unsolicited SDTR message; rather than go * through the effort of decoding it and then figuring out what * our reply should be, we're just gonna say that we have a * synchronous fifo depth of 0. This will result in asynchronous * transfers - not ideal but so much easier. * Actually, this is OK because it assures us that if we don't * specifically ask for sync transfers, we won't do any. */ write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 3; hostdata->outgoing_msg[2] = EXTENDED_SDTR; hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4; hostdata->outgoing_msg[4] = 0; hostdata->outgoing_len = 5; hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0); } else { hostdata->sync_xfer[cmd->device->id] = id; } #ifdef SYNC_DEBUG printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]); #endif hostdata->sync_stat[cmd->device->id] = SS_SET; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_WDTR: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("sending WDTR "); hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 2; hostdata->outgoing_msg[2] = EXTENDED_WDTR; hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ hostdata->outgoing_len = 4; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; default: write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]); hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; } hostdata->incoming_ptr = 0; } /* We need to read more MESS_IN bytes for the extended message */ else { hostdata->incoming_ptr++; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; default: printk("Rejecting Unknown Message(%02x) ", msg); write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SEL_XFER_DONE: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_3393(hostdata, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) cmd->SCp.Status = lun; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); } else { printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SDP: DB(DB_INTR, printk("SDP")) hostdata->state = S_RUNNING_LEVEL2; write_3393(hostdata, WD_COMMAND_PHASE, 0x41); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); break; case CSR_XFER_DONE | PHS_MESS_OUT: case CSR_UNEXP | PHS_MESS_OUT: case CSR_SRV_REQ | PHS_MESS_OUT: DB(DB_INTR, printk("MSG_OUT=")) /* To get here, we've probably requested MESSAGE_OUT and have * already put the correct bytes in outgoing_msg[] and filled * in outgoing_len. We simply send them out to the SCSI bus. * Sometimes we get MESSAGE_OUT phase when we're not expecting * it - like when our SDTR message is rejected by a target. Some * targets send the REJECT before receiving all of the extended * message, and then seem to go back to MESSAGE_OUT for a byte * or two. Not sure why, or if I'm doing something wrong to * cause this to happen. Regardless, it seems that sending * NOP messages in these situations results in no harm and * makes everyone happy. */ if (hostdata->outgoing_len == 0) { hostdata->outgoing_len = 1; hostdata->outgoing_msg[0] = NOP; } transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata); DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) hostdata->outgoing_len = 0; hostdata->state = S_CONNECTED; break; case CSR_UNEXP_DISC: /* I think I've seen this after a request-sense that was in response * to an error condition, but not sure. We certainly need to do * something when we get this interrupt - the question is 'what?'. * Let's think positively, and assume some command has finished * in a legal manner (like a command that provokes a request-sense), * so we treat it as a normal command-complete-disconnect. */ /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_DISC: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_3393(hostdata, WD_SOURCE_ID, SRCID_ER); DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; } switch (hostdata->state) { case S_PRE_CMP_DISC: hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; DB(DB_INTR, printk(":%d", cmd->SCp.Status)) if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); break; case S_PRE_TMP_DISC: case S_RUNNING_LEVEL2: cmd->host_scribble = (uchar *) hostdata->disconnected_Q; hostdata->disconnected_Q = cmd; hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; #ifdef PROC_STATISTICS hostdata->disc_done_cnt[cmd->device->id]++; #endif break; default: printk("*** Unexpected DISCONNECT interrupt! ***"); hostdata->state = S_UNCONNECTED; } /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ in2000_execute(instance); break; case CSR_RESEL_AM: DB(DB_INTR, printk("RESEL")) /* First we have to make sure this reselection didn't */ /* happen during Arbitration/Selection of some other device. */ /* If yes, put losing command back on top of input_Q. */ if (hostdata->level2 <= L2_NONE) { if (hostdata->selecting) { cmd = (Scsi_Cmnd *) hostdata->selecting; hostdata->selecting = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } } else { if (cmd) { if (phs == 0x00) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs); while (1) printk("\r"); } } } /* OK - find out which device reselected us. */ id = read_3393(hostdata, WD_SOURCE_ID); id &= SRCID_MASK; /* and extract the lun from the ID message. (Note that we don't * bother to check for a valid message here - I guess this is * not the right way to go, but....) */ lun = read_3393(hostdata, WD_DATA); if (hostdata->level2 < L2_RESELECT) write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK); lun &= 7; /* Now we look for the command that's reconnecting. */ cmd = (Scsi_Cmnd *) hostdata->disconnected_Q; patch = NULL; while (cmd) { if (id == cmd->device->id && lun == cmd->device->lun) break; patch = cmd; cmd = (Scsi_Cmnd *) cmd->host_scribble; } /* Hmm. Couldn't find a valid command.... What to do? */ if (!cmd) { printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun); break; } /* Ok, found the command - now start it up again. */ if (patch) patch->host_scribble = cmd->host_scribble; else hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble; hostdata->connected = cmd; /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' * because these things are preserved over a disconnect. * But we DO need to fix the DPD bit so it's correct for this command. */ if (is_dir_out(cmd)) write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id); else write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); if (hostdata->level2 >= L2_RESELECT) { write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */ write_3393(hostdata, WD_COMMAND_PHASE, 0x45); write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else hostdata->state = S_CONNECTED; break; default: printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); } write1_io(0, IO_LED_OFF); DB(DB_INTR, printk("} ")) /* release the SMP spin_lock and restore irq state */ spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } #define RESET_CARD 0 #define RESET_CARD_AND_BUS 1 #define B_FLAG 0x80 /* * Caller must hold instance lock! */ static int reset_hardware(struct Scsi_Host *instance, int type) { struct IN2000_hostdata *hostdata; int qt, x; hostdata = (struct IN2000_hostdata *) instance->hostdata; write1_io(0, IO_LED_ON); if (type == RESET_CARD_AND_BUS) { write1_io(0, IO_CARD_RESET); x = read1_io(IO_HARDWARE); } x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */ write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF)); write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write_3393(hostdata, WD_COMMAND, WD_CMD_RESET); /* FIXME: timeout ?? */ while (!(READ_AUX_STAT() & ASR_INT)) cpu_relax(); /* wait for RESET to complete */ x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */ write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */ qt = read_3393(hostdata, WD_QUEUE_TAG); if (qt == 0xa5) { x |= B_FLAG; write_3393(hostdata, WD_QUEUE_TAG, 0); } write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write1_io(0, IO_LED_OFF); return x; } static int in2000_bus_reset(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int x; unsigned long flags; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no); spin_lock_irqsave(instance->host_lock, flags); /* do scsi-reset here */ reset_hardware(instance, RESET_CARD_AND_BUS); for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; cmd->result = DID_RESET << 16; spin_unlock_irqrestore(instance->host_lock, flags); return SUCCESS; } static int __in2000_abort(Scsi_Cmnd * cmd) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; Scsi_Cmnd *tmp, *prev; uchar sr, asr; unsigned long timeout; instance = cmd->device->host; hostdata = (struct IN2000_hostdata *) instance->hostdata; printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no); printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT)); /* * Case 1 : If the command hasn't been issued yet, we simply remove it * from the inout_Q. */ tmp = (Scsi_Cmnd *) hostdata->input_Q; prev = NULL; while (tmp) { if (tmp == cmd) { if (prev) prev->host_scribble = cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no); cmd->scsi_done(cmd); return SUCCESS; } prev = tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble; } /* * Case 2 : If the command is connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected == cmd) { printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no); printk("sending wd33c93 ABORT command - "); write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_3393_cmd(hostdata, WD_CMD_ABORT); /* Now we have to attempt to flush out the FIFO... */ printk("flushing fifo - "); timeout = 1000000; do { asr = READ_AUX_STAT(); if (asr & ASR_DBR) read_3393(hostdata, WD_DATA); } while (!(asr & ASR_INT) && timeout-- > 0); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout); /* * Abort command processed. * Still connected. * We must disconnect. */ printk("sending wd33c93 DISCONNECT command - "); write_3393_cmd(hostdata, WD_CMD_DISCONNECT); timeout = 1000000; asr = READ_AUX_STAT(); while ((asr & ASR_CIP) && timeout-- > 0) asr = READ_AUX_STAT(); sr = read_3393(hostdata, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x.", asr, sr); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; cmd->result = DID_ABORT << 16; cmd->scsi_done(cmd); in2000_execute(instance); return SUCCESS; } /* * Case 3: If the command is currently disconnected from the bus, * we're not going to expend much effort here: Let's just return * an ABORT_SNOOZE and hope for the best... */ for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no); return FAILED; } /* * Case 4 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ in2000_execute(instance); printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no); return SUCCESS; } static int in2000_abort(Scsi_Cmnd * cmd) { int rc; spin_lock_irq(cmd->device->host->host_lock); rc = __in2000_abort(cmd); spin_unlock_irq(cmd->device->host->host_lock); return rc; } #define MAX_IN2000_HOSTS 3 #define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) #define SETUP_BUFFER_SIZE 200 static char setup_buffer[SETUP_BUFFER_SIZE]; static char setup_used[MAX_SETUP_ARGS]; static int done_setup = 0; static void __init in2000_setup(char *str, int *ints) { int i; char *p1, *p2; strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE); p1 = setup_buffer; i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); if (p2) { *p2 = '\0'; if (p1 != p2) setup_args[i] = p1; p1 = p2 + 1; i++; } else { setup_args[i] = p1; break; } } for (i = 0; i < MAX_SETUP_ARGS; i++) setup_used[i] = 0; done_setup = 1; } /* check_setup_args() returns index if key found, 0 if not */ static int __init check_setup_args(char *key, int *val, char *buf) { int x; char *cp; for (x = 0; x < MAX_SETUP_ARGS; x++) { if (setup_used[x]) continue; if (!strncmp(setup_args[x], key, strlen(key))) break; } if (x == MAX_SETUP_ARGS) return 0; setup_used[x] = 1; cp = setup_args[x] + strlen(key); *val = -1; if (*cp != ':') return ++x; cp++; if ((*cp >= '0') && (*cp <= '9')) { *val = simple_strtoul(cp, NULL, 0); } return ++x; } /* The "correct" (ie portable) way to access memory-mapped hardware * such as the IN2000 EPROM and dip switch is through the use of * special macros declared in 'asm/io.h'. We use readb() and readl() * when reading from the card's BIOS area in in2000_detect(). */ static u32 bios_tab[] in2000__INITDATA = { 0xc8000, 0xd0000, 0xd8000, 0 }; static unsigned short base_tab[] in2000__INITDATA = { 0x220, 0x200, 0x110, 0x100, }; static int int_tab[] in2000__INITDATA = { 15, 14, 11, 10 }; static int probe_bios(u32 addr, u32 *s1, uchar *switches) { void __iomem *p = ioremap(addr, 0x34); if (!p) return 0; *s1 = readl(p + 0x10); if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) { /* Read the switch image that's mapped into EPROM space */ *switches = ~readb(p + 0x20); iounmap(p); return 1; } iounmap(p); return 0; } static int __init in2000_detect(struct scsi_host_template * tpnt) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; int detect_count; int bios; int x; unsigned short base; uchar switches; uchar hrev; unsigned long flags; int val; char buf[32]; /* Thanks to help from Bill Earnest, probing for IN2000 cards is a * pretty straightforward and fool-proof operation. There are 3 * possible locations for the IN2000 EPROM in memory space - if we * find a BIOS signature, we can read the dip switch settings from * the byte at BIOS+32 (shadowed in by logic on the card). From 2 * of the switch bits we get the card's address in IO space. There's * an image of the dip switch there, also, so we have a way to back- * check that this really is an IN2000 card. Very nifty. Use the * 'ioport:xx' command-line parameter if your BIOS EPROM is absent * or disabled. */ if (!done_setup && setup_strings) in2000_setup(setup_strings, NULL); detect_count = 0; for (bios = 0; bios_tab[bios]; bios++) { u32 s1 = 0; if (check_setup_args("ioport", &val, buf)) { base = val; switches = ~inb(base + IO_SWITCHES) & 0xff; printk("Forcing IN2000 detection at IOport 0x%x ", base); bios = 2; } /* * There have been a couple of BIOS versions with different layouts * for the obvious ID strings. We look for the 2 most common ones and * hope that they cover all the cases... */ else if (probe_bios(bios_tab[bios], &s1, &switches)) { printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]); /* Find out where the IO space is */ x = switches & (SW_ADDR0 | SW_ADDR1); base = base_tab[x]; /* Check for the IN2000 signature in IO space. */ x = ~inb(base + IO_SWITCHES) & 0xff; if (x != switches) { printk("Bad IO signature: %02x vs %02x.\n", x, switches); continue; } } else continue; /* OK. We have a base address for the IO ports - run a few safety checks */ if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */ printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base); continue; } /* Let's assume any hardware version will work, although the driver * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll * print out the rev number for reference later, but accept them all. */ hrev = inb(base + IO_HARDWARE); /* Bit 2 tells us if interrupts are disabled */ if (switches & SW_DISINT) { printk("The IN-2000 SCSI card at IOport 0x%03x ", base); printk("is not configured for interrupt operation!\n"); printk("This driver requires an interrupt: cancelling detection.\n"); continue; } /* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now * initialize it. */ tpnt->proc_name = "in2000"; instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata)); if (instance == NULL) continue; detect_count++; hostdata = (struct IN2000_hostdata *) instance->hostdata; instance->io_port = hostdata->io_base = base; hostdata->dip_switch = switches; hostdata->hrev = hrev; write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */ write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */ write1_io(0, IO_INTR_MASK); /* allow all ints */ x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { printk("in2000_detect: Unable to allocate IRQ.\n"); detect_count--; continue; } instance->irq = x; instance->n_io_port = 13; request_region(base, 13, "in2000"); /* lock in this IO space for our use */ for (x = 0; x < 8; x++) { hostdata->busy[x] = 0; hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF); hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */ #ifdef PROC_STATISTICS hostdata->cmd_cnt[x] = 0; hostdata->disc_allowed_cnt[x] = 0; hostdata->disc_done_cnt[x] = 0; #endif } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->fifo = FI_FIFO_UNUSED; hostdata->level2 = L2_BASIC; hostdata->disconnect = DIS_ADAPTIVE; hostdata->args = DEBUG_DEFAULTS; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; hostdata->default_sx_per = DEFAULT_SX_PER; /* Older BIOS's had a 'sync on/off' switch - use its setting */ if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5)) hostdata->sync_off = 0x00; /* sync defaults to on */ else hostdata->sync_off = 0xff; /* sync defaults to off */ #ifdef PROC_INTERFACE hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; #ifdef PROC_STATISTICS hostdata->int_cnt = 0; #endif #endif if (check_setup_args("nosync", &val, buf)) hostdata->sync_off = val; if (check_setup_args("period", &val, buf)) hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns; if (check_setup_args("disconnect", &val, buf)) { if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) hostdata->disconnect = val; else hostdata->disconnect = DIS_ADAPTIVE; } if (check_setup_args("noreset", &val, buf)) hostdata->args ^= A_NO_SCSI_RESET; if (check_setup_args("level2", &val, buf)) hostdata->level2 = val; if (check_setup_args("debug", &val, buf)) hostdata->args = (val & DB_MASK); #ifdef PROC_INTERFACE if (check_setup_args("proc", &val, buf)) hostdata->proc = val; #endif /* FIXME: not strictly needed I think but the called code expects to be locked */ spin_lock_irqsave(instance->host_lock, flags); x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS); spin_unlock_irqrestore(instance->host_lock, flags); hostdata->microcode = read_3393(hostdata, WD_CDB_1); if (x & 0x01) { if (x & B_FLAG) hostdata->chip = C_WD33C93B; else hostdata->chip = C_WD33C93A; } else hostdata->chip = C_WD33C93; printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No"); printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode); #ifdef DEBUGGING_ON printk("setup_args = "); for (x = 0; x < MAX_SETUP_ARGS; x++) printk("%s,", setup_args[x]); printk("\n"); #endif if (hostdata->sync_off == 0xff) printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n"); printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE); } return detect_count; } static int in2000_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); return 0; } /* NOTE: I lifted this function straight out of the old driver, * and have not tested it. Presumably it does what it's * supposed to do... */ static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo) { int size; size = capacity; iinfo[0] = 64; iinfo[1] = 32; iinfo[2] = size >> 11; /* This should approximate the large drive handling that the DOS ASPI manager uses. Drives very near the boundaries may not be handled correctly (i.e. near 2.0 Gb and 4.0 Gb) */ if (iinfo[2] > 1024) { iinfo[0] = 64; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 128; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } if (iinfo[2] > 1024) { iinfo[0] = 255; iinfo[1] = 63; iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]); } return 0; } static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in) { #ifdef PROC_INTERFACE char *bp; char tbuf[128]; unsigned long flags; struct IN2000_hostdata *hd; Scsi_Cmnd *cmd; int x, i; static int stop = 0; hd = (struct IN2000_hostdata *) instance->hostdata; /* If 'in' is TRUE we need to _read_ the proc file. We accept the following * keywords (same format as command-line, but only ONE per read): * debug * disconnect * period * resync * proc */ if (in) { buf[len] = '\0'; bp = buf; if (!strncmp(bp, "debug:", 6)) { bp += 6; hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK; } else if (!strncmp(bp, "disconnect:", 11)) { bp += 11; x = simple_strtoul(bp, NULL, 0); if (x < DIS_NEVER || x > DIS_ALWAYS) x = DIS_ADAPTIVE; hd->disconnect = x; } else if (!strncmp(bp, "period:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns; } else if (!strncmp(bp, "resync:", 7)) { bp += 7; x = simple_strtoul(bp, NULL, 0); for (i = 0; i < 7; i++) if (x & (1 << i)) hd->sync_stat[i] = SS_UNSET; } else if (!strncmp(bp, "proc:", 5)) { bp += 5; hd->proc = simple_strtoul(bp, NULL, 0); } else if (!strncmp(bp, "level2:", 7)) { bp += 7; hd->level2 = simple_strtoul(bp, NULL, 0); } return len; } spin_lock_irqsave(instance->host_lock, flags); bp = buf; *bp = '\0'; if (hd->proc & PR_VERSION) { sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); strcat(bp, tbuf); } if (hd->proc & PR_INFO) { sprintf(tbuf, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No"); strcat(bp, tbuf); strcat(bp, "\nsync_xfer[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_xfer[x]); strcat(bp, tbuf); } strcat(bp, "\nsync_stat[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_stat[x]); strcat(bp, tbuf); } } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { strcat(bp, "\ncommands issued: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects allowed:"); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects done: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]); strcat(bp, tbuf); } sprintf(tbuf, "\ninterrupts: \t%ld", hd->int_cnt); strcat(bp, tbuf); } #endif if (hd->proc & PR_CONNECTED) { strcat(bp, "\nconnected: "); if (hd->connected) { cmd = (Scsi_Cmnd *) hd->connected; sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); } } if (hd->proc & PR_INPUTQ) { strcat(bp, "\ninput_Q: "); cmd = (Scsi_Cmnd *) hd->input_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { strcat(bp, "\ndisconnected_Q:"); cmd = (Scsi_Cmnd *) hd->disconnected_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (Scsi_Cmnd *) cmd->host_scribble; } } if (hd->proc & PR_TEST) { ; /* insert your own custom function here */ } strcat(bp, "\n"); spin_unlock_irqrestore(instance->host_lock, flags); *start = buf; if (stop) { stop = 0; return 0; /* return 0 to signal end-of-file */ } if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */ stop = 1; if (hd->proc & PR_STOP) /* stop every other time */ stop = 1; return strlen(bp); #else /* PROC_INTERFACE */ return 0; #endif /* PROC_INTERFACE */ } MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .proc_name = "in2000", .proc_info = in2000_proc_info, .name = "Always IN2000", .detect = in2000_detect, .release = in2000_release, .queuecommand = in2000_queuecommand, .eh_abort_handler = in2000_abort, .eh_bus_reset_handler = in2000_bus_reset, .bios_param = in2000_biosparam, .can_queue = IN2000_CAN_Q, .this_id = IN2000_HOST_ID, .sg_tablesize = IN2000_SG, .cmd_per_lun = IN2000_CPL, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c"
gpl-2.0
muryliang/linux
arch/arm/mach-omap2/omap_hwmod_common_data.c
4574
1979
/* * omap_hwmod common data structures * * Copyright (C) 2010 Texas Instruments, Inc. * Thara Gopinath <thara@ti.com> * Benoît Cousson * * Copyright (C) 2010 Nokia Corporation * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This data/structures are to be used while defining OMAP on-chip module * data and their integration with other OMAP modules and Linux. */ #include "omap_hwmod.h" #include "omap_hwmod_common_data.h" /** * struct omap_hwmod_sysc_type1 - TYPE1 sysconfig scheme. * * To be used by hwmod structure to specify the sysconfig offsets * if the device ip is compliant with the original PRCM protocol * defined for OMAP2420. */ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1 = { .midle_shift = SYSC_TYPE1_MIDLEMODE_SHIFT, .clkact_shift = SYSC_TYPE1_CLOCKACTIVITY_SHIFT, .sidle_shift = SYSC_TYPE1_SIDLEMODE_SHIFT, .enwkup_shift = SYSC_TYPE1_ENAWAKEUP_SHIFT, .srst_shift = SYSC_TYPE1_SOFTRESET_SHIFT, .autoidle_shift = SYSC_TYPE1_AUTOIDLE_SHIFT, }; /** * struct omap_hwmod_sysc_type2 - TYPE2 sysconfig scheme. * * To be used by hwmod structure to specify the sysconfig offsets if the * device ip is compliant with the new PRCM protocol defined for new * OMAP4 IPs. */ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = { .midle_shift = SYSC_TYPE2_MIDLEMODE_SHIFT, .sidle_shift = SYSC_TYPE2_SIDLEMODE_SHIFT, .srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT, .dmadisable_shift = SYSC_TYPE2_DMADISABLE_SHIFT, }; /** * struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme. * Used by some IPs on AM33xx */ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3 = { .midle_shift = SYSC_TYPE3_MIDLEMODE_SHIFT, .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT, }; struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = { .manager_count = 2, .has_framedonetv_irq = 0 };
gpl-2.0
dezelin/kvm
drivers/video/mb862xx/mb862xx-i2c.c
4574
3816
/* * Coral-P(A)/Lime I2C adapter driver * * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/export.h> #include "mb862xxfb.h" #include "mb862xx_reg.h" static int mb862xx_i2c_wait_event(struct i2c_adapter *adap) { struct mb862xxfb_par *par = adap->algo_data; u32 reg; do { udelay(10); reg = inreg(i2c, GC_I2C_BCR); if (reg & (I2C_INT | I2C_BER)) break; } while (1); return (reg & I2C_BER) ? 0 : 1; } static int mb862xx_i2c_do_address(struct i2c_adapter *adap, int addr) { struct mb862xxfb_par *par = adap->algo_data; outreg(i2c, GC_I2C_DAR, addr); outreg(i2c, GC_I2C_CCR, I2C_CLOCK_AND_ENABLE); outreg(i2c, GC_I2C_BCR, par->i2c_rs ? I2C_REPEATED_START : I2C_START); if (!mb862xx_i2c_wait_event(adap)) return -EIO; par->i2c_rs = !(inreg(i2c, GC_I2C_BSR) & I2C_LRB); return par->i2c_rs; } static int mb862xx_i2c_write_byte(struct i2c_adapter *adap, u8 byte) { struct mb862xxfb_par *par = adap->algo_data; outreg(i2c, GC_I2C_DAR, byte); outreg(i2c, GC_I2C_BCR, I2C_START); if (!mb862xx_i2c_wait_event(adap)) return -EIO; return !(inreg(i2c, GC_I2C_BSR) & I2C_LRB); } static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last) { struct mb862xxfb_par *par = adap->algo_data; outreg(i2c, GC_I2C_BCR, I2C_START | (last ? 0 : I2C_ACK)); if (!mb862xx_i2c_wait_event(adap)) return 0; *byte = inreg(i2c, GC_I2C_DAR); return 1; } static void mb862xx_i2c_stop(struct i2c_adapter *adap) { struct mb862xxfb_par *par = adap->algo_data; outreg(i2c, GC_I2C_BCR, I2C_STOP); outreg(i2c, GC_I2C_CCR, I2C_DISABLE); par->i2c_rs = 0; } static int mb862xx_i2c_read(struct i2c_adapter *adap, struct i2c_msg *m) { int i, ret = 0; int last = m->len - 1; for (i = 0; i < m->len; i++) { if (!mb862xx_i2c_read_byte(adap, &m->buf[i], i == last)) { ret = -EIO; break; } } return ret; } static int mb862xx_i2c_write(struct i2c_adapter *adap, struct i2c_msg *m) { int i, ret = 0; for (i = 0; i < m->len; i++) { if (!mb862xx_i2c_write_byte(adap, m->buf[i])) { ret = -EIO; break; } } return ret; } static int mb862xx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct mb862xxfb_par *par = adap->algo_data; struct i2c_msg *m; int addr; int i = 0, err = 0; dev_dbg(par->dev, "%s: %d msgs\n", __func__, num); for (i = 0; i < num; i++) { m = &msgs[i]; if (!m->len) { dev_dbg(par->dev, "%s: null msgs\n", __func__); continue; } addr = m->addr; if (m->flags & I2C_M_RD) addr |= 1; err = mb862xx_i2c_do_address(adap, addr); if (err < 0) break; if (m->flags & I2C_M_RD) err = mb862xx_i2c_read(adap, m); else err = mb862xx_i2c_write(adap, m); } if (i) mb862xx_i2c_stop(adap); return (err < 0) ? err : i; } static u32 mb862xx_func(struct i2c_adapter *adap) { return I2C_FUNC_SMBUS_BYTE_DATA; } static const struct i2c_algorithm mb862xx_algo = { .master_xfer = mb862xx_xfer, .functionality = mb862xx_func, }; static struct i2c_adapter mb862xx_i2c_adapter = { .name = "MB862xx I2C adapter", .algo = &mb862xx_algo, .owner = THIS_MODULE, }; int mb862xx_i2c_init(struct mb862xxfb_par *par) { int ret; mb862xx_i2c_adapter.algo_data = par; par->adap = &mb862xx_i2c_adapter; ret = i2c_add_adapter(par->adap); if (ret < 0) { dev_err(par->dev, "failed to add %s\n", mb862xx_i2c_adapter.name); } return ret; } void mb862xx_i2c_exit(struct mb862xxfb_par *par) { if (par->adap) { i2c_del_adapter(par->adap); par->adap = NULL; } }
gpl-2.0
warped-rudi/linux-sunxi
arch/arm/mach-prima2/prima2.c
4830
1063
/* * Defines machines for CSR SiRFprimaII * * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. * * Licensed under GPLv2 or later. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/sizes.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <linux/of.h> #include <linux/of_platform.h> #include "common.h" static struct of_device_id sirfsoc_of_bus_ids[] __initdata = { { .compatible = "simple-bus", }, {}, }; void __init sirfsoc_mach_init(void) { of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL); } static const char *prima2cb_dt_match[] __initdata = { "sirf,prima2-cb", NULL }; MACHINE_START(PRIMA2_EVB, "prima2cb") /* Maintainer: Barry Song <baohua.song@csr.com> */ .atag_offset = 0x100, .init_early = sirfsoc_of_clk_init, .map_io = sirfsoc_map_lluart, .init_irq = sirfsoc_of_irq_init, .timer = &sirfsoc_timer, .dma_zone_size = SZ_256M, .init_machine = sirfsoc_mach_init, .dt_compat = prima2cb_dt_match, .restart = sirfsoc_restart, MACHINE_END
gpl-2.0
dorimanx/DORIMANX_LG_STOCK_LP_KERNEL
drivers/media/rc/streamzap.c
5086
14135
/* * Streamzap Remote Control driver * * Copyright (c) 2005 Christoph Bartelmus <lirc@bartelmus.de> * Copyright (c) 2010 Jarod Wilson <jarod@wilsonet.com> * * This driver was based on the work of Greg Wickham and Adrian * Dewhurst. It was substantially rewritten to support correct signal * gaps and now maintains a delay buffer, which is used to present * consistent timing behaviour to user space applications. Without the * delay buffer an ugly hack would be required in lircd, which can * cause sluggish signal decoding in certain situations. * * Ported to in-kernel ir-core interface by Jarod Wilson * * This driver is based on the USB skeleton driver packaged with the * kernel; copyright (C) 2001-2003 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #define DRIVER_VERSION "1.61" #define DRIVER_NAME "streamzap" #define DRIVER_DESC "Streamzap Remote Control driver" #ifdef CONFIG_USB_DEBUG static bool debug = 1; #else static bool debug; #endif #define USB_STREAMZAP_VENDOR_ID 0x0e9c #define USB_STREAMZAP_PRODUCT_ID 0x0000 /* table of devices that work with this driver */ static struct usb_device_id streamzap_table[] = { /* Streamzap Remote Control */ { USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(usb, streamzap_table); #define SZ_PULSE_MASK 0xf0 #define SZ_SPACE_MASK 0x0f #define SZ_TIMEOUT 0xff #define SZ_RESOLUTION 256 /* number of samples buffered */ #define SZ_BUF_LEN 128 /* from ir-rc5-sz-decoder.c */ #ifdef CONFIG_IR_RC5_SZ_DECODER_MODULE #define load_rc5_sz_decode() request_module("ir-rc5-sz-decoder") #else #define load_rc5_sz_decode() {} #endif enum StreamzapDecoderState { PulseSpace, FullPulse, FullSpace, IgnorePulse }; /* structure to hold our device specific stuff */ struct streamzap_ir { /* ir-core */ struct rc_dev *rdev; /* core device info */ struct device *dev; /* usb */ struct usb_device *usbdev; struct usb_interface *interface; struct usb_endpoint_descriptor *endpoint; struct urb *urb_in; /* buffer & dma */ unsigned char *buf_in; dma_addr_t dma_in; unsigned int buf_in_len; /* track what state we're in */ enum StreamzapDecoderState decoder_state; /* tracks whether we are currently receiving some signal */ bool idle; /* sum of signal lengths received since signal start */ unsigned long sum; /* start time of signal; necessary for gap tracking */ struct timeval signal_last; struct timeval signal_start; bool timeout_enabled; char name[128]; char phys[64]; }; /* local function prototypes */ static int streamzap_probe(struct usb_interface *interface, const struct usb_device_id *id); static void streamzap_disconnect(struct usb_interface *interface); static void streamzap_callback(struct urb *urb); static int streamzap_suspend(struct usb_interface *intf, pm_message_t message); static int streamzap_resume(struct usb_interface *intf); /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver streamzap_driver = { .name = DRIVER_NAME, .probe = streamzap_probe, .disconnect = streamzap_disconnect, .suspend = streamzap_suspend, .resume = streamzap_resume, .id_table = streamzap_table, }; static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir) { dev_dbg(sz->dev, "Storing %s with duration %u us\n", (rawir.pulse ? "pulse" : "space"), rawir.duration); ir_raw_event_store_with_filter(sz->rdev, &rawir); } static void sz_push_full_pulse(struct streamzap_ir *sz, unsigned char value) { DEFINE_IR_RAW_EVENT(rawir); if (sz->idle) { long deltv; sz->signal_last = sz->signal_start; do_gettimeofday(&sz->signal_start); deltv = sz->signal_start.tv_sec - sz->signal_last.tv_sec; rawir.pulse = false; if (deltv > 15) { /* really long time */ rawir.duration = IR_MAX_DURATION; } else { rawir.duration = (int)(deltv * 1000000 + sz->signal_start.tv_usec - sz->signal_last.tv_usec); rawir.duration -= sz->sum; rawir.duration = US_TO_NS(rawir.duration); rawir.duration &= IR_MAX_DURATION; } sz_push(sz, rawir); sz->idle = false; sz->sum = 0; } rawir.pulse = true; rawir.duration = ((int) value) * SZ_RESOLUTION; rawir.duration += SZ_RESOLUTION / 2; sz->sum += rawir.duration; rawir.duration = US_TO_NS(rawir.duration); rawir.duration &= IR_MAX_DURATION; sz_push(sz, rawir); } static void sz_push_half_pulse(struct streamzap_ir *sz, unsigned char value) { sz_push_full_pulse(sz, (value & SZ_PULSE_MASK) >> 4); } static void sz_push_full_space(struct streamzap_ir *sz, unsigned char value) { DEFINE_IR_RAW_EVENT(rawir); rawir.pulse = false; rawir.duration = ((int) value) * SZ_RESOLUTION; rawir.duration += SZ_RESOLUTION / 2; sz->sum += rawir.duration; rawir.duration = US_TO_NS(rawir.duration); sz_push(sz, rawir); } static void sz_push_half_space(struct streamzap_ir *sz, unsigned long value) { sz_push_full_space(sz, value & SZ_SPACE_MASK); } /** * streamzap_callback - usb IRQ handler callback * * This procedure is invoked on reception of data from * the usb remote. */ static void streamzap_callback(struct urb *urb) { struct streamzap_ir *sz; unsigned int i; int len; if (!urb) return; sz = urb->context; len = urb->actual_length; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* * this urb is terminated, clean up. * sz might already be invalid at this point */ dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); return; default: break; } dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); for (i = 0; i < len; i++) { dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n", i, (unsigned char)sz->buf_in[i]); switch (sz->decoder_state) { case PulseSpace: if ((sz->buf_in[i] & SZ_PULSE_MASK) == SZ_PULSE_MASK) { sz->decoder_state = FullPulse; continue; } else if ((sz->buf_in[i] & SZ_SPACE_MASK) == SZ_SPACE_MASK) { sz_push_half_pulse(sz, sz->buf_in[i]); sz->decoder_state = FullSpace; continue; } else { sz_push_half_pulse(sz, sz->buf_in[i]); sz_push_half_space(sz, sz->buf_in[i]); } break; case FullPulse: sz_push_full_pulse(sz, sz->buf_in[i]); sz->decoder_state = IgnorePulse; break; case FullSpace: if (sz->buf_in[i] == SZ_TIMEOUT) { DEFINE_IR_RAW_EVENT(rawir); rawir.pulse = false; rawir.duration = sz->rdev->timeout; sz->idle = true; if (sz->timeout_enabled) sz_push(sz, rawir); ir_raw_event_handle(sz->rdev); ir_raw_event_reset(sz->rdev); } else { sz_push_full_space(sz, sz->buf_in[i]); } sz->decoder_state = PulseSpace; break; case IgnorePulse: if ((sz->buf_in[i] & SZ_SPACE_MASK) == SZ_SPACE_MASK) { sz->decoder_state = FullSpace; continue; } sz_push_half_space(sz, sz->buf_in[i]); sz->decoder_state = PulseSpace; break; } } ir_raw_event_handle(sz->rdev); usb_submit_urb(urb, GFP_ATOMIC); return; } static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz) { struct rc_dev *rdev; struct device *dev = sz->dev; int ret; rdev = rc_allocate_device(); if (!rdev) { dev_err(dev, "remote dev allocation failed\n"); goto out; } snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared " "Receiver (%04x:%04x)", le16_to_cpu(sz->usbdev->descriptor.idVendor), le16_to_cpu(sz->usbdev->descriptor.idProduct)); usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys)); strlcat(sz->phys, "/input0", sizeof(sz->phys)); rdev->input_name = sz->name; rdev->input_phys = sz->phys; usb_to_input_id(sz->usbdev, &rdev->input_id); rdev->dev.parent = dev; rdev->priv = sz; rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protos = RC_TYPE_ALL; rdev->driver_name = DRIVER_NAME; rdev->map_name = RC_MAP_STREAMZAP; ret = rc_register_device(rdev); if (ret < 0) { dev_err(dev, "remote input device register failed\n"); goto out; } return rdev; out: rc_free_device(rdev); return NULL; } /** * streamzap_probe * * Called by usb-core to associated with a candidate device * On any failure the return value is the ERROR * On success return 0 */ static int __devinit streamzap_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(intf); struct usb_host_interface *iface_host; struct streamzap_ir *sz = NULL; char buf[63], name[128] = ""; int retval = -ENOMEM; int pipe, maxp; /* Allocate space for device driver specific data */ sz = kzalloc(sizeof(struct streamzap_ir), GFP_KERNEL); if (!sz) return -ENOMEM; sz->usbdev = usbdev; sz->interface = intf; /* Check to ensure endpoint information matches requirements */ iface_host = intf->cur_altsetting; if (iface_host->desc.bNumEndpoints != 1) { dev_err(&intf->dev, "%s: Unexpected desc.bNumEndpoints (%d)\n", __func__, iface_host->desc.bNumEndpoints); retval = -ENODEV; goto free_sz; } sz->endpoint = &(iface_host->endpoint[0].desc); if ((sz->endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != USB_DIR_IN) { dev_err(&intf->dev, "%s: endpoint doesn't match input device " "02%02x\n", __func__, sz->endpoint->bEndpointAddress); retval = -ENODEV; goto free_sz; } if ((sz->endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_INT) { dev_err(&intf->dev, "%s: endpoint attributes don't match xfer " "02%02x\n", __func__, sz->endpoint->bmAttributes); retval = -ENODEV; goto free_sz; } pipe = usb_rcvintpipe(usbdev, sz->endpoint->bEndpointAddress); maxp = usb_maxpacket(usbdev, pipe, usb_pipeout(pipe)); if (maxp == 0) { dev_err(&intf->dev, "%s: endpoint Max Packet Size is 0!?!\n", __func__); retval = -ENODEV; goto free_sz; } /* Allocate the USB buffer and IRQ URB */ sz->buf_in = usb_alloc_coherent(usbdev, maxp, GFP_ATOMIC, &sz->dma_in); if (!sz->buf_in) goto free_sz; sz->urb_in = usb_alloc_urb(0, GFP_KERNEL); if (!sz->urb_in) goto free_buf_in; sz->dev = &intf->dev; sz->buf_in_len = maxp; if (usbdev->descriptor.iManufacturer && usb_string(usbdev, usbdev->descriptor.iManufacturer, buf, sizeof(buf)) > 0) strlcpy(name, buf, sizeof(name)); if (usbdev->descriptor.iProduct && usb_string(usbdev, usbdev->descriptor.iProduct, buf, sizeof(buf)) > 0) snprintf(name + strlen(name), sizeof(name) - strlen(name), " %s", buf); sz->rdev = streamzap_init_rc_dev(sz); if (!sz->rdev) goto rc_dev_fail; sz->idle = true; sz->decoder_state = PulseSpace; /* FIXME: don't yet have a way to set this */ sz->timeout_enabled = true; sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) & IR_MAX_DURATION) | 0x03000000); #if 0 /* not yet supported, depends on patches from maxim */ /* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */ sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION); #endif do_gettimeofday(&sz->signal_start); /* Complete final initialisations */ usb_fill_int_urb(sz->urb_in, usbdev, pipe, sz->buf_in, maxp, (usb_complete_t)streamzap_callback, sz, sz->endpoint->bInterval); sz->urb_in->transfer_dma = sz->dma_in; sz->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_set_intfdata(intf, sz); if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) dev_err(sz->dev, "urb submit failed\n"); dev_info(sz->dev, "Registered %s on usb%d:%d\n", name, usbdev->bus->busnum, usbdev->devnum); /* Load the streamzap not-quite-rc5 decoder too */ load_rc5_sz_decode(); return 0; rc_dev_fail: usb_free_urb(sz->urb_in); free_buf_in: usb_free_coherent(usbdev, maxp, sz->buf_in, sz->dma_in); free_sz: kfree(sz); return retval; } /** * streamzap_disconnect * * Called by the usb core when the device is removed from the system. * * This routine guarantees that the driver will not submit any more urbs * by clearing dev->usbdev. It is also supposed to terminate any currently * active urbs. Unfortunately, usb_bulk_msg(), used in streamzap_read(), * does not provide any way to do this. */ static void streamzap_disconnect(struct usb_interface *interface) { struct streamzap_ir *sz = usb_get_intfdata(interface); struct usb_device *usbdev = interface_to_usbdev(interface); usb_set_intfdata(interface, NULL); if (!sz) return; sz->usbdev = NULL; rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in); kfree(sz); } static int streamzap_suspend(struct usb_interface *intf, pm_message_t message) { struct streamzap_ir *sz = usb_get_intfdata(intf); usb_kill_urb(sz->urb_in); return 0; } static int streamzap_resume(struct usb_interface *intf) { struct streamzap_ir *sz = usb_get_intfdata(intf); if (usb_submit_urb(sz->urb_in, GFP_ATOMIC)) { dev_err(sz->dev, "Error sumbiting urb\n"); return -EIO; } return 0; } module_usb_driver(streamzap_driver); MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging messages");
gpl-2.0
lyfkevin/lge-kernel-iproj
drivers/media/video/saa7164/saa7164-core.c
7134
42124
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/div64.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #endif #include "saa7164.h" MODULE_DESCRIPTION("Driver for NXP SAA7164 based TV cards"); MODULE_AUTHOR("Steven Toth <stoth@kernellabs.com>"); MODULE_LICENSE("GPL"); /* * 1 Basic * 2 * 4 i2c * 8 api * 16 cmd * 32 bus */ unsigned int saa_debug; module_param_named(debug, saa_debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); unsigned int fw_debug; module_param(fw_debug, int, 0644); MODULE_PARM_DESC(fw_debug, "Firware debug level def:2"); unsigned int encoder_buffers = SAA7164_MAX_ENCODER_BUFFERS; module_param(encoder_buffers, int, 0644); MODULE_PARM_DESC(encoder_buffers, "Total buffers in read queue 16-512 def:64"); unsigned int vbi_buffers = SAA7164_MAX_VBI_BUFFERS; module_param(vbi_buffers, int, 0644); MODULE_PARM_DESC(vbi_buffers, "Total buffers in read queue 16-512 def:64"); unsigned int waitsecs = 10; module_param(waitsecs, int, 0644); MODULE_PARM_DESC(waitsecs, "timeout on firmware messages"); static unsigned int card[] = {[0 ... (SAA7164_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(card, "card type"); unsigned int print_histogram = 64; module_param(print_histogram, int, 0644); MODULE_PARM_DESC(print_histogram, "print histogram values once"); unsigned int crc_checking = 1; module_param(crc_checking, int, 0644); MODULE_PARM_DESC(crc_checking, "enable crc sanity checking on buffers"); unsigned int guard_checking = 1; module_param(guard_checking, int, 0644); MODULE_PARM_DESC(guard_checking, "enable dma sanity checking for buffer overruns"); static unsigned int saa7164_devcount; static DEFINE_MUTEX(devlist); LIST_HEAD(saa7164_devlist); #define INT_SIZE 16 void saa7164_dumphex16FF(struct saa7164_dev *dev, u8 *buf, int len) { int i; u8 tmp[16]; memset(&tmp[0], 0xff, sizeof(tmp)); printk(KERN_INFO "--------------------> " "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n"); for (i = 0; i < len; i += 16) { if (memcmp(&tmp, buf + i, sizeof(tmp)) != 0) { printk(KERN_INFO " [0x%08x] " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, *(buf+i+0), *(buf+i+1), *(buf+i+2), *(buf+i+3), *(buf+i+4), *(buf+i+5), *(buf+i+6), *(buf+i+7), *(buf+i+8), *(buf+i+9), *(buf+i+10), *(buf+i+11), *(buf+i+12), *(buf+i+13), *(buf+i+14), *(buf+i+15)); } } } static void saa7164_pack_verifier(struct saa7164_buffer *buf) { u8 *p = (u8 *)buf->cpu; int i; for (i = 0; i < buf->actual_size; i += 2048) { if ((*(p + i + 0) != 0x00) || (*(p + i + 1) != 0x00) || (*(p + i + 2) != 0x01) || (*(p + i + 3) != 0xBA)) { printk(KERN_ERR "No pack at 0x%x\n", i); #if 0 saa7164_dumphex16FF(buf->port->dev, (p + i), 32); #endif } } } #define FIXED_VIDEO_PID 0xf1 #define FIXED_AUDIO_PID 0xf2 static void saa7164_ts_verifier(struct saa7164_buffer *buf) { struct saa7164_port *port = buf->port; u32 i; u8 cc, a; u16 pid; u8 __iomem *bufcpu = (u8 *)buf->cpu; port->sync_errors = 0; port->v_cc_errors = 0; port->a_cc_errors = 0; for (i = 0; i < buf->actual_size; i += 188) { if (*(bufcpu + i) != 0x47) port->sync_errors++; /* TODO: Query pid lower 8 bits, ignoring upper bits intensionally */ pid = ((*(bufcpu + i + 1) & 0x1f) << 8) | *(bufcpu + i + 2); cc = *(bufcpu + i + 3) & 0x0f; if (pid == FIXED_VIDEO_PID) { a = ((port->last_v_cc + 1) & 0x0f); if (a != cc) { printk(KERN_ERR "video cc last = %x current = %x i = %d\n", port->last_v_cc, cc, i); port->v_cc_errors++; } port->last_v_cc = cc; } else if (pid == FIXED_AUDIO_PID) { a = ((port->last_a_cc + 1) & 0x0f); if (a != cc) { printk(KERN_ERR "audio cc last = %x current = %x i = %d\n", port->last_a_cc, cc, i); port->a_cc_errors++; } port->last_a_cc = cc; } } /* Only report errors if we've been through this function atleast * once already and the cached cc values are primed. First time through * always generates errors. */ if (port->v_cc_errors && (port->done_first_interrupt > 1)) printk(KERN_ERR "video pid cc, %d errors\n", port->v_cc_errors); if (port->a_cc_errors && (port->done_first_interrupt > 1)) printk(KERN_ERR "audio pid cc, %d errors\n", port->a_cc_errors); if (port->sync_errors && (port->done_first_interrupt > 1)) printk(KERN_ERR "sync_errors = %d\n", port->sync_errors); if (port->done_first_interrupt == 1) port->done_first_interrupt++; } static void saa7164_histogram_reset(struct saa7164_histogram *hg, char *name) { int i; memset(hg, 0, sizeof(struct saa7164_histogram)); strcpy(hg->name, name); /* First 30ms x 1ms */ for (i = 0; i < 30; i++) hg->counter1[0 + i].val = i; /* 30 - 200ms x 10ms */ for (i = 0; i < 18; i++) hg->counter1[30 + i].val = 30 + (i * 10); /* 200 - 2000ms x 100ms */ for (i = 0; i < 15; i++) hg->counter1[48 + i].val = 200 + (i * 200); /* Catch all massive value (2secs) */ hg->counter1[55].val = 2000; /* Catch all massive value (4secs) */ hg->counter1[56].val = 4000; /* Catch all massive value (8secs) */ hg->counter1[57].val = 8000; /* Catch all massive value (15secs) */ hg->counter1[58].val = 15000; /* Catch all massive value (30secs) */ hg->counter1[59].val = 30000; /* Catch all massive value (60secs) */ hg->counter1[60].val = 60000; /* Catch all massive value (5mins) */ hg->counter1[61].val = 300000; /* Catch all massive value (15mins) */ hg->counter1[62].val = 900000; /* Catch all massive values (1hr) */ hg->counter1[63].val = 3600000; } void saa7164_histogram_update(struct saa7164_histogram *hg, u32 val) { int i; for (i = 0; i < 64; i++) { if (val <= hg->counter1[i].val) { hg->counter1[i].count++; hg->counter1[i].update_time = jiffies; break; } } } static void saa7164_histogram_print(struct saa7164_port *port, struct saa7164_histogram *hg) { u32 entries = 0; int i; printk(KERN_ERR "Histogram named %s (ms, count, last_update_jiffy)\n", hg->name); for (i = 0; i < 64; i++) { if (hg->counter1[i].count == 0) continue; printk(KERN_ERR " %4d %12d %Ld\n", hg->counter1[i].val, hg->counter1[i].count, hg->counter1[i].update_time); entries++; } printk(KERN_ERR "Total: %d\n", entries); } static void saa7164_work_enchandler_helper(struct saa7164_port *port, int bufnr) { struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf = NULL; struct saa7164_user_buffer *ubuf = NULL; struct list_head *c, *n; int i = 0; u8 __iomem *p; mutex_lock(&port->dmaqueue_lock); list_for_each_safe(c, n, &port->dmaqueue.list) { buf = list_entry(c, struct saa7164_buffer, list); if (i++ > port->hwcfg.buffercount) { printk(KERN_ERR "%s() illegal i count %d\n", __func__, i); break; } if (buf->idx == bufnr) { /* Found the buffer, deal with it */ dprintk(DBGLVL_IRQ, "%s() bufnr: %d\n", __func__, bufnr); if (crc_checking) { /* Throw a new checksum on the dma buffer */ buf->crc = crc32(0, buf->cpu, buf->actual_size); } if (guard_checking) { p = (u8 *)buf->cpu; if ((*(p + buf->actual_size + 0) != 0xff) || (*(p + buf->actual_size + 1) != 0xff) || (*(p + buf->actual_size + 2) != 0xff) || (*(p + buf->actual_size + 3) != 0xff) || (*(p + buf->actual_size + 0x10) != 0xff) || (*(p + buf->actual_size + 0x11) != 0xff) || (*(p + buf->actual_size + 0x12) != 0xff) || (*(p + buf->actual_size + 0x13) != 0xff)) { printk(KERN_ERR "%s() buf %p guard buffer breach\n", __func__, buf); #if 0 saa7164_dumphex16FF(dev, (p + buf->actual_size) - 32 , 64); #endif } } if ((port->nr != SAA7164_PORT_VBI1) && (port->nr != SAA7164_PORT_VBI2)) { /* Validate the incoming buffer content */ if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_TS) saa7164_ts_verifier(buf); else if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) saa7164_pack_verifier(buf); } /* find a free user buffer and clone to it */ if (!list_empty(&port->list_buf_free.list)) { /* Pull the first buffer from the used list */ ubuf = list_first_entry(&port->list_buf_free.list, struct saa7164_user_buffer, list); if (buf->actual_size <= ubuf->actual_size) { memcpy_fromio(ubuf->data, buf->cpu, ubuf->actual_size); if (crc_checking) { /* Throw a new checksum on the read buffer */ ubuf->crc = crc32(0, ubuf->data, ubuf->actual_size); } /* Requeue the buffer on the free list */ ubuf->pos = 0; list_move_tail(&ubuf->list, &port->list_buf_used.list); /* Flag any userland waiters */ wake_up_interruptible(&port->wait_read); } else { printk(KERN_ERR "buf %p bufsize fails match\n", buf); } } else printk(KERN_ERR "encirq no free buffers, increase param encoder_buffers\n"); /* Ensure offset into buffer remains 0, fill buffer * with known bad data. We check for this data at a later point * in time. */ saa7164_buffer_zero_offsets(port, bufnr); memset_io(buf->cpu, 0xff, buf->pci_size); if (crc_checking) { /* Throw yet aanother new checksum on the dma buffer */ buf->crc = crc32(0, buf->cpu, buf->actual_size); } break; } } mutex_unlock(&port->dmaqueue_lock); } static void saa7164_work_enchandler(struct work_struct *w) { struct saa7164_port *port = container_of(w, struct saa7164_port, workenc); struct saa7164_dev *dev = port->dev; u32 wp, mcb, rp, cnt = 0; port->last_svc_msecs_diff = port->last_svc_msecs; port->last_svc_msecs = jiffies_to_msecs(jiffies); port->last_svc_msecs_diff = port->last_svc_msecs - port->last_svc_msecs_diff; saa7164_histogram_update(&port->svc_interval, port->last_svc_msecs_diff); port->last_irq_svc_msecs_diff = port->last_svc_msecs - port->last_irq_msecs; saa7164_histogram_update(&port->irq_svc_interval, port->last_irq_svc_msecs_diff); dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n", __func__, port->last_svc_msecs_diff, port->last_irq_svc_msecs_diff, port->last_svc_wp, port->last_svc_rp ); /* Current write position */ wp = saa7164_readl(port->bufcounter); if (wp > (port->hwcfg.buffercount - 1)) { printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp); return; } /* Most current complete buffer */ if (wp == 0) mcb = (port->hwcfg.buffercount - 1); else mcb = wp - 1; while (1) { if (port->done_first_interrupt == 0) { port->done_first_interrupt++; rp = mcb; } else rp = (port->last_svc_rp + 1) % 8; if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) { printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp); break; } saa7164_work_enchandler_helper(port, rp); port->last_svc_rp = rp; cnt++; if (rp == mcb) break; } /* TODO: Convert this into a /proc/saa7164 style readable file */ if (print_histogram == port->nr) { saa7164_histogram_print(port, &port->irq_interval); saa7164_histogram_print(port, &port->svc_interval); saa7164_histogram_print(port, &port->irq_svc_interval); saa7164_histogram_print(port, &port->read_interval); saa7164_histogram_print(port, &port->poll_interval); /* TODO: fix this to preserve any previous state */ print_histogram = 64 + port->nr; } } static void saa7164_work_vbihandler(struct work_struct *w) { struct saa7164_port *port = container_of(w, struct saa7164_port, workenc); struct saa7164_dev *dev = port->dev; u32 wp, mcb, rp, cnt = 0; port->last_svc_msecs_diff = port->last_svc_msecs; port->last_svc_msecs = jiffies_to_msecs(jiffies); port->last_svc_msecs_diff = port->last_svc_msecs - port->last_svc_msecs_diff; saa7164_histogram_update(&port->svc_interval, port->last_svc_msecs_diff); port->last_irq_svc_msecs_diff = port->last_svc_msecs - port->last_irq_msecs; saa7164_histogram_update(&port->irq_svc_interval, port->last_irq_svc_msecs_diff); dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed irq->deferred %Ldms wp: %d rp: %d\n", __func__, port->last_svc_msecs_diff, port->last_irq_svc_msecs_diff, port->last_svc_wp, port->last_svc_rp ); /* Current write position */ wp = saa7164_readl(port->bufcounter); if (wp > (port->hwcfg.buffercount - 1)) { printk(KERN_ERR "%s() illegal buf count %d\n", __func__, wp); return; } /* Most current complete buffer */ if (wp == 0) mcb = (port->hwcfg.buffercount - 1); else mcb = wp - 1; while (1) { if (port->done_first_interrupt == 0) { port->done_first_interrupt++; rp = mcb; } else rp = (port->last_svc_rp + 1) % 8; if ((rp < 0) || (rp > (port->hwcfg.buffercount - 1))) { printk(KERN_ERR "%s() illegal rp count %d\n", __func__, rp); break; } saa7164_work_enchandler_helper(port, rp); port->last_svc_rp = rp; cnt++; if (rp == mcb) break; } /* TODO: Convert this into a /proc/saa7164 style readable file */ if (print_histogram == port->nr) { saa7164_histogram_print(port, &port->irq_interval); saa7164_histogram_print(port, &port->svc_interval); saa7164_histogram_print(port, &port->irq_svc_interval); saa7164_histogram_print(port, &port->read_interval); saa7164_histogram_print(port, &port->poll_interval); /* TODO: fix this to preserve any previous state */ print_histogram = 64 + port->nr; } } static void saa7164_work_cmdhandler(struct work_struct *w) { struct saa7164_dev *dev = container_of(w, struct saa7164_dev, workcmd); /* Wake up any complete commands */ saa7164_irq_dequeue(dev); } static void saa7164_buffer_deliver(struct saa7164_buffer *buf) { struct saa7164_port *port = buf->port; /* Feed the transport payload into the kernel demux */ dvb_dmx_swfilter_packets(&port->dvb.demux, (u8 *)buf->cpu, SAA7164_TS_NUMBER_OF_LINES); } static irqreturn_t saa7164_irq_vbi(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; /* Store old time */ port->last_irq_msecs_diff = port->last_irq_msecs; /* Collect new stats */ port->last_irq_msecs = jiffies_to_msecs(jiffies); /* Calculate stats */ port->last_irq_msecs_diff = port->last_irq_msecs - port->last_irq_msecs_diff; saa7164_histogram_update(&port->irq_interval, port->last_irq_msecs_diff); dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__, port->last_irq_msecs_diff); /* Tis calls the vbi irq handler */ schedule_work(&port->workenc); return 0; } static irqreturn_t saa7164_irq_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; /* Store old time */ port->last_irq_msecs_diff = port->last_irq_msecs; /* Collect new stats */ port->last_irq_msecs = jiffies_to_msecs(jiffies); /* Calculate stats */ port->last_irq_msecs_diff = port->last_irq_msecs - port->last_irq_msecs_diff; saa7164_histogram_update(&port->irq_interval, port->last_irq_msecs_diff); dprintk(DBGLVL_IRQ, "%s() %Ldms elapsed\n", __func__, port->last_irq_msecs_diff); schedule_work(&port->workenc); return 0; } static irqreturn_t saa7164_irq_ts(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_buffer *buf; struct list_head *c, *n; int wp, i = 0, rp; /* Find the current write point from the hardware */ wp = saa7164_readl(port->bufcounter); if (wp > (port->hwcfg.buffercount - 1)) BUG(); /* Find the previous buffer to the current write point */ if (wp == 0) rp = (port->hwcfg.buffercount - 1); else rp = wp - 1; /* Lookup the WP in the buffer list */ /* TODO: turn this into a worker thread */ list_for_each_safe(c, n, &port->dmaqueue.list) { buf = list_entry(c, struct saa7164_buffer, list); if (i++ > port->hwcfg.buffercount) BUG(); if (buf->idx == rp) { /* Found the buffer, deal with it */ dprintk(DBGLVL_IRQ, "%s() wp: %d processing: %d\n", __func__, wp, rp); saa7164_buffer_deliver(buf); break; } } return 0; } /* Primary IRQ handler and dispatch mechanism */ static irqreturn_t saa7164_irq(int irq, void *dev_id) { struct saa7164_dev *dev = dev_id; struct saa7164_port *porta = &dev->ports[SAA7164_PORT_TS1]; struct saa7164_port *portb = &dev->ports[SAA7164_PORT_TS2]; struct saa7164_port *portc = &dev->ports[SAA7164_PORT_ENC1]; struct saa7164_port *portd = &dev->ports[SAA7164_PORT_ENC2]; struct saa7164_port *porte = &dev->ports[SAA7164_PORT_VBI1]; struct saa7164_port *portf = &dev->ports[SAA7164_PORT_VBI2]; u32 intid, intstat[INT_SIZE/4]; int i, handled = 0, bit; if (dev == NULL) { printk(KERN_ERR "%s() No device specified\n", __func__); handled = 0; goto out; } /* Check that the hardware is accessible. If the status bytes are * 0xFF then the device is not accessible, the the IRQ belongs * to another driver. * 4 x u32 interrupt registers. */ for (i = 0; i < INT_SIZE/4; i++) { /* TODO: Convert into saa7164_readl() */ /* Read the 4 hardware interrupt registers */ intstat[i] = saa7164_readl(dev->int_status + (i * 4)); if (intstat[i]) handled = 1; } if (handled == 0) goto out; /* For each of the HW interrupt registers */ for (i = 0; i < INT_SIZE/4; i++) { if (intstat[i]) { /* Each function of the board has it's own interruptid. * Find the function that triggered then call * it's handler. */ for (bit = 0; bit < 32; bit++) { if (((intstat[i] >> bit) & 0x00000001) == 0) continue; /* Calculate the interrupt id (0x00 to 0x7f) */ intid = (i * 32) + bit; if (intid == dev->intfdesc.bInterruptId) { /* A response to an cmd/api call */ schedule_work(&dev->workcmd); } else if (intid == porta->hwcfg.interruptid) { /* Transport path 1 */ saa7164_irq_ts(porta); } else if (intid == portb->hwcfg.interruptid) { /* Transport path 2 */ saa7164_irq_ts(portb); } else if (intid == portc->hwcfg.interruptid) { /* Encoder path 1 */ saa7164_irq_encoder(portc); } else if (intid == portd->hwcfg.interruptid) { /* Encoder path 2 */ saa7164_irq_encoder(portd); } else if (intid == porte->hwcfg.interruptid) { /* VBI path 1 */ saa7164_irq_vbi(porte); } else if (intid == portf->hwcfg.interruptid) { /* VBI path 2 */ saa7164_irq_vbi(portf); } else { /* Find the function */ dprintk(DBGLVL_IRQ, "%s() unhandled interrupt " "reg 0x%x bit 0x%x " "intid = 0x%x\n", __func__, i, bit, intid); } } /* Ack it */ saa7164_writel(dev->int_ack + (i * 4), intstat[i]); } } out: return IRQ_RETVAL(handled); } void saa7164_getfirmwarestatus(struct saa7164_dev *dev) { struct saa7164_fw_status *s = &dev->fw_status; dev->fw_status.status = saa7164_readl(SAA_DEVICE_SYSINIT_STATUS); dev->fw_status.mode = saa7164_readl(SAA_DEVICE_SYSINIT_MODE); dev->fw_status.spec = saa7164_readl(SAA_DEVICE_SYSINIT_SPEC); dev->fw_status.inst = saa7164_readl(SAA_DEVICE_SYSINIT_INST); dev->fw_status.cpuload = saa7164_readl(SAA_DEVICE_SYSINIT_CPULOAD); dev->fw_status.remainheap = saa7164_readl(SAA_DEVICE_SYSINIT_REMAINHEAP); dprintk(1, "Firmware status:\n"); dprintk(1, " .status = 0x%08x\n", s->status); dprintk(1, " .mode = 0x%08x\n", s->mode); dprintk(1, " .spec = 0x%08x\n", s->spec); dprintk(1, " .inst = 0x%08x\n", s->inst); dprintk(1, " .cpuload = 0x%08x\n", s->cpuload); dprintk(1, " .remainheap = 0x%08x\n", s->remainheap); } u32 saa7164_getcurrentfirmwareversion(struct saa7164_dev *dev) { u32 reg; reg = saa7164_readl(SAA_DEVICE_VERSION); dprintk(1, "Device running firmware version %d.%d.%d.%d (0x%x)\n", (reg & 0x0000fc00) >> 10, (reg & 0x000003e0) >> 5, (reg & 0x0000001f), (reg & 0xffff0000) >> 16, reg); return reg; } /* TODO: Debugging func, remove */ void saa7164_dumphex16(struct saa7164_dev *dev, u8 *buf, int len) { int i; printk(KERN_INFO "--------------------> " "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n"); for (i = 0; i < len; i += 16) printk(KERN_INFO " [0x%08x] " "%02x %02x %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, *(buf+i+0), *(buf+i+1), *(buf+i+2), *(buf+i+3), *(buf+i+4), *(buf+i+5), *(buf+i+6), *(buf+i+7), *(buf+i+8), *(buf+i+9), *(buf+i+10), *(buf+i+11), *(buf+i+12), *(buf+i+13), *(buf+i+14), *(buf+i+15)); } /* TODO: Debugging func, remove */ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr) { int i; dprintk(1, "--------------------> " "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n"); for (i = 0; i < 0x100; i += 16) dprintk(1, "region0[0x%08x] = " "%02x %02x %02x %02x %02x %02x %02x %02x" " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, (u8)saa7164_readb(addr + i + 0), (u8)saa7164_readb(addr + i + 1), (u8)saa7164_readb(addr + i + 2), (u8)saa7164_readb(addr + i + 3), (u8)saa7164_readb(addr + i + 4), (u8)saa7164_readb(addr + i + 5), (u8)saa7164_readb(addr + i + 6), (u8)saa7164_readb(addr + i + 7), (u8)saa7164_readb(addr + i + 8), (u8)saa7164_readb(addr + i + 9), (u8)saa7164_readb(addr + i + 10), (u8)saa7164_readb(addr + i + 11), (u8)saa7164_readb(addr + i + 12), (u8)saa7164_readb(addr + i + 13), (u8)saa7164_readb(addr + i + 14), (u8)saa7164_readb(addr + i + 15) ); } static void saa7164_dump_hwdesc(struct saa7164_dev *dev) { dprintk(1, "@0x%p hwdesc sizeof(struct tmComResHWDescr) = %d bytes\n", &dev->hwdesc, (u32)sizeof(struct tmComResHWDescr)); dprintk(1, " .bLength = 0x%x\n", dev->hwdesc.bLength); dprintk(1, " .bDescriptorType = 0x%x\n", dev->hwdesc.bDescriptorType); dprintk(1, " .bDescriptorSubtype = 0x%x\n", dev->hwdesc.bDescriptorSubtype); dprintk(1, " .bcdSpecVersion = 0x%x\n", dev->hwdesc.bcdSpecVersion); dprintk(1, " .dwClockFrequency = 0x%x\n", dev->hwdesc.dwClockFrequency); dprintk(1, " .dwClockUpdateRes = 0x%x\n", dev->hwdesc.dwClockUpdateRes); dprintk(1, " .bCapabilities = 0x%x\n", dev->hwdesc.bCapabilities); dprintk(1, " .dwDeviceRegistersLocation = 0x%x\n", dev->hwdesc.dwDeviceRegistersLocation); dprintk(1, " .dwHostMemoryRegion = 0x%x\n", dev->hwdesc.dwHostMemoryRegion); dprintk(1, " .dwHostMemoryRegionSize = 0x%x\n", dev->hwdesc.dwHostMemoryRegionSize); dprintk(1, " .dwHostHibernatMemRegion = 0x%x\n", dev->hwdesc.dwHostHibernatMemRegion); dprintk(1, " .dwHostHibernatMemRegionSize = 0x%x\n", dev->hwdesc.dwHostHibernatMemRegionSize); } static void saa7164_dump_intfdesc(struct saa7164_dev *dev) { dprintk(1, "@0x%p intfdesc " "sizeof(struct tmComResInterfaceDescr) = %d bytes\n", &dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr)); dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength); dprintk(1, " .bDescriptorType = 0x%x\n", dev->intfdesc.bDescriptorType); dprintk(1, " .bDescriptorSubtype = 0x%x\n", dev->intfdesc.bDescriptorSubtype); dprintk(1, " .bFlags = 0x%x\n", dev->intfdesc.bFlags); dprintk(1, " .bInterfaceType = 0x%x\n", dev->intfdesc.bInterfaceType); dprintk(1, " .bInterfaceId = 0x%x\n", dev->intfdesc.bInterfaceId); dprintk(1, " .bBaseInterface = 0x%x\n", dev->intfdesc.bBaseInterface); dprintk(1, " .bInterruptId = 0x%x\n", dev->intfdesc.bInterruptId); dprintk(1, " .bDebugInterruptId = 0x%x\n", dev->intfdesc.bDebugInterruptId); dprintk(1, " .BARLocation = 0x%x\n", dev->intfdesc.BARLocation); } static void saa7164_dump_busdesc(struct saa7164_dev *dev) { dprintk(1, "@0x%p busdesc sizeof(struct tmComResBusDescr) = %d bytes\n", &dev->busdesc, (u32)sizeof(struct tmComResBusDescr)); dprintk(1, " .CommandRing = 0x%016Lx\n", dev->busdesc.CommandRing); dprintk(1, " .ResponseRing = 0x%016Lx\n", dev->busdesc.ResponseRing); dprintk(1, " .CommandWrite = 0x%x\n", dev->busdesc.CommandWrite); dprintk(1, " .CommandRead = 0x%x\n", dev->busdesc.CommandRead); dprintk(1, " .ResponseWrite = 0x%x\n", dev->busdesc.ResponseWrite); dprintk(1, " .ResponseRead = 0x%x\n", dev->busdesc.ResponseRead); } /* Much of the hardware configuration and PCI registers are configured * dynamically depending on firmware. We have to cache some initial * structures then use these to locate other important structures * from PCI space. */ static void saa7164_get_descriptors(struct saa7164_dev *dev) { memcpy_fromio(&dev->hwdesc, dev->bmmio, sizeof(struct tmComResHWDescr)); memcpy_fromio(&dev->intfdesc, dev->bmmio + sizeof(struct tmComResHWDescr), sizeof(struct tmComResInterfaceDescr)); memcpy_fromio(&dev->busdesc, dev->bmmio + dev->intfdesc.BARLocation, sizeof(struct tmComResBusDescr)); if (dev->hwdesc.bLength != sizeof(struct tmComResHWDescr)) { printk(KERN_ERR "Structure struct tmComResHWDescr is mangled\n"); printk(KERN_ERR "Need %x got %d\n", dev->hwdesc.bLength, (u32)sizeof(struct tmComResHWDescr)); } else saa7164_dump_hwdesc(dev); if (dev->intfdesc.bLength != sizeof(struct tmComResInterfaceDescr)) { printk(KERN_ERR "struct struct tmComResInterfaceDescr is mangled\n"); printk(KERN_ERR "Need %x got %d\n", dev->intfdesc.bLength, (u32)sizeof(struct tmComResInterfaceDescr)); } else saa7164_dump_intfdesc(dev); saa7164_dump_busdesc(dev); } static int saa7164_pci_quirks(struct saa7164_dev *dev) { return 0; } static int get_resources(struct saa7164_dev *dev) { if (request_mem_region(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0), dev->name)) { if (request_mem_region(pci_resource_start(dev->pci, 2), pci_resource_len(dev->pci, 2), dev->name)) return 0; } printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx or 0x%llx\n", dev->name, (u64)pci_resource_start(dev->pci, 0), (u64)pci_resource_start(dev->pci, 2)); return -EBUSY; } static int saa7164_port_init(struct saa7164_dev *dev, int portnr) { struct saa7164_port *port = NULL; if ((portnr < 0) || (portnr >= SAA7164_MAX_PORTS)) BUG(); port = &dev->ports[portnr]; port->dev = dev; port->nr = portnr; if ((portnr == SAA7164_PORT_TS1) || (portnr == SAA7164_PORT_TS2)) port->type = SAA7164_MPEG_DVB; else if ((portnr == SAA7164_PORT_ENC1) || (portnr == SAA7164_PORT_ENC2)) { port->type = SAA7164_MPEG_ENCODER; /* We need a deferred interrupt handler for cmd handling */ INIT_WORK(&port->workenc, saa7164_work_enchandler); } else if ((portnr == SAA7164_PORT_VBI1) || (portnr == SAA7164_PORT_VBI2)) { port->type = SAA7164_MPEG_VBI; /* We need a deferred interrupt handler for cmd handling */ INIT_WORK(&port->workenc, saa7164_work_vbihandler); } else BUG(); /* Init all the critical resources */ mutex_init(&port->dvb.lock); INIT_LIST_HEAD(&port->dmaqueue.list); mutex_init(&port->dmaqueue_lock); INIT_LIST_HEAD(&port->list_buf_used.list); INIT_LIST_HEAD(&port->list_buf_free.list); init_waitqueue_head(&port->wait_read); saa7164_histogram_reset(&port->irq_interval, "irq intervals"); saa7164_histogram_reset(&port->svc_interval, "deferred intervals"); saa7164_histogram_reset(&port->irq_svc_interval, "irq to deferred intervals"); saa7164_histogram_reset(&port->read_interval, "encoder/vbi read() intervals"); saa7164_histogram_reset(&port->poll_interval, "encoder/vbi poll() intervals"); return 0; } static int saa7164_dev_setup(struct saa7164_dev *dev) { int i; mutex_init(&dev->lock); atomic_inc(&dev->refcount); dev->nr = saa7164_devcount++; snprintf(dev->name, sizeof(dev->name), "saa7164[%d]", dev->nr); mutex_lock(&devlist); list_add_tail(&dev->devlist, &saa7164_devlist); mutex_unlock(&devlist); /* board config */ dev->board = UNSET; if (card[dev->nr] < saa7164_bcount) dev->board = card[dev->nr]; for (i = 0; UNSET == dev->board && i < saa7164_idcount; i++) if (dev->pci->subsystem_vendor == saa7164_subids[i].subvendor && dev->pci->subsystem_device == saa7164_subids[i].subdevice) dev->board = saa7164_subids[i].card; if (UNSET == dev->board) { dev->board = SAA7164_BOARD_UNKNOWN; saa7164_card_list(dev); } dev->pci_bus = dev->pci->bus->number; dev->pci_slot = PCI_SLOT(dev->pci->devfn); /* I2C Defaults / setup */ dev->i2c_bus[0].dev = dev; dev->i2c_bus[0].nr = 0; dev->i2c_bus[1].dev = dev; dev->i2c_bus[1].nr = 1; dev->i2c_bus[2].dev = dev; dev->i2c_bus[2].nr = 2; /* Transport + Encoder ports 1, 2, 3, 4 - Defaults / setup */ saa7164_port_init(dev, SAA7164_PORT_TS1); saa7164_port_init(dev, SAA7164_PORT_TS2); saa7164_port_init(dev, SAA7164_PORT_ENC1); saa7164_port_init(dev, SAA7164_PORT_ENC2); saa7164_port_init(dev, SAA7164_PORT_VBI1); saa7164_port_init(dev, SAA7164_PORT_VBI2); if (get_resources(dev) < 0) { printk(KERN_ERR "CORE %s No more PCIe resources for " "subsystem: %04x:%04x\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device); saa7164_devcount--; return -ENODEV; } /* PCI/e allocations */ dev->lmmio = ioremap(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0)); dev->lmmio2 = ioremap(pci_resource_start(dev->pci, 2), pci_resource_len(dev->pci, 2)); dev->bmmio = (u8 __iomem *)dev->lmmio; dev->bmmio2 = (u8 __iomem *)dev->lmmio2; /* Inerrupt and ack register locations offset of bmmio */ dev->int_status = 0x183000 + 0xf80; dev->int_ack = 0x183000 + 0xf90; printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", dev->name, dev->pci->subsystem_vendor, dev->pci->subsystem_device, saa7164_boards[dev->board].name, dev->board, card[dev->nr] == dev->board ? "insmod option" : "autodetected"); saa7164_pci_quirks(dev); return 0; } static void saa7164_dev_unregister(struct saa7164_dev *dev) { dprintk(1, "%s()\n", __func__); release_mem_region(pci_resource_start(dev->pci, 0), pci_resource_len(dev->pci, 0)); release_mem_region(pci_resource_start(dev->pci, 2), pci_resource_len(dev->pci, 2)); if (!atomic_dec_and_test(&dev->refcount)) return; iounmap(dev->lmmio); iounmap(dev->lmmio2); return; } #ifdef CONFIG_PROC_FS static int saa7164_proc_show(struct seq_file *m, void *v) { struct saa7164_dev *dev; struct tmComResBusInfo *b; struct list_head *list; int i, c; if (saa7164_devcount == 0) return 0; list_for_each(list, &saa7164_devlist) { dev = list_entry(list, struct saa7164_dev, devlist); seq_printf(m, "%s = %p\n", dev->name, dev); /* Lock the bus from any other access */ b = &dev->bus; mutex_lock(&b->lock); seq_printf(m, " .m_pdwSetWritePos = 0x%x (0x%08x)\n", b->m_dwSetReadPos, saa7164_readl(b->m_dwSetReadPos)); seq_printf(m, " .m_pdwSetReadPos = 0x%x (0x%08x)\n", b->m_dwSetWritePos, saa7164_readl(b->m_dwSetWritePos)); seq_printf(m, " .m_pdwGetWritePos = 0x%x (0x%08x)\n", b->m_dwGetReadPos, saa7164_readl(b->m_dwGetReadPos)); seq_printf(m, " .m_pdwGetReadPos = 0x%x (0x%08x)\n", b->m_dwGetWritePos, saa7164_readl(b->m_dwGetWritePos)); c = 0; seq_printf(m, "\n Set Ring:\n"); seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n"); for (i = 0; i < b->m_dwSizeSetRing; i++) { if (c == 0) seq_printf(m, " %04x:", i); seq_printf(m, " %02x", *(b->m_pdwSetRing + i)); if (++c == 16) { seq_printf(m, "\n"); c = 0; } } c = 0; seq_printf(m, "\n Get Ring:\n"); seq_printf(m, "\n addr 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n"); for (i = 0; i < b->m_dwSizeGetRing; i++) { if (c == 0) seq_printf(m, " %04x:", i); seq_printf(m, " %02x", *(b->m_pdwGetRing + i)); if (++c == 16) { seq_printf(m, "\n"); c = 0; } } mutex_unlock(&b->lock); } return 0; } static int saa7164_proc_open(struct inode *inode, struct file *filp) { return single_open(filp, saa7164_proc_show, NULL); } static const struct file_operations saa7164_proc_fops = { .open = saa7164_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int saa7164_proc_create(void) { struct proc_dir_entry *pe; pe = proc_create("saa7164", S_IRUGO, NULL, &saa7164_proc_fops); if (!pe) return -ENOMEM; return 0; } #endif static int saa7164_thread_function(void *data) { struct saa7164_dev *dev = data; struct tmFwInfoStruct fwinfo; u64 last_poll_time = 0; dprintk(DBGLVL_THR, "thread started\n"); set_freezable(); while (1) { msleep_interruptible(100); if (kthread_should_stop()) break; try_to_freeze(); dprintk(DBGLVL_THR, "thread running\n"); /* Dump the firmware debug message to console */ /* Polling this costs us 1-2% of the arm CPU */ /* convert this into a respnde to interrupt 0x7a */ saa7164_api_collect_debug(dev); /* Monitor CPU load every 1 second */ if ((last_poll_time + 1000 /* ms */) < jiffies_to_msecs(jiffies)) { saa7164_api_get_load_info(dev, &fwinfo); last_poll_time = jiffies_to_msecs(jiffies); } } dprintk(DBGLVL_THR, "thread exiting\n"); return 0; } static int __devinit saa7164_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct saa7164_dev *dev; int err, i; u32 version; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (NULL == dev) return -ENOMEM; /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail_free; } if (saa7164_dev_setup(dev) < 0) { err = -EINVAL; goto fail_free; } /* print pci info */ dev->pci_rev = pci_dev->revision; pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat, (unsigned long long)pci_resource_start(pci_dev, 0)); pci_set_master(pci_dev); /* TODO */ if (!pci_dma_supported(pci_dev, 0xffffffff)) { printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); err = -EIO; goto fail_irq; } err = request_irq(pci_dev->irq, saa7164_irq, IRQF_SHARED | IRQF_DISABLED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name, pci_dev->irq); err = -EIO; goto fail_irq; } pci_set_drvdata(pci_dev, dev); /* Init the internal command list */ for (i = 0; i < SAA_CMD_MAX_MSG_UNITS; i++) { dev->cmds[i].seqno = i; dev->cmds[i].inuse = 0; mutex_init(&dev->cmds[i].lock); init_waitqueue_head(&dev->cmds[i].wait); } /* We need a deferred interrupt handler for cmd handling */ INIT_WORK(&dev->workcmd, saa7164_work_cmdhandler); /* Only load the firmware if we know the board */ if (dev->board != SAA7164_BOARD_UNKNOWN) { err = saa7164_downloadfirmware(dev); if (err < 0) { printk(KERN_ERR "Failed to boot firmware, no features " "registered\n"); goto fail_fw; } saa7164_get_descriptors(dev); saa7164_dumpregs(dev, 0); saa7164_getcurrentfirmwareversion(dev); saa7164_getfirmwarestatus(dev); err = saa7164_bus_setup(dev); if (err < 0) printk(KERN_ERR "Failed to setup the bus, will continue\n"); saa7164_bus_dump(dev); /* Ping the running firmware via the command bus and get the * firmware version, this checks the bus is running OK. */ version = 0; if (saa7164_api_get_fw_version(dev, &version) == SAA_OK) dprintk(1, "Bus is operating correctly using " "version %d.%d.%d.%d (0x%x)\n", (version & 0x0000fc00) >> 10, (version & 0x000003e0) >> 5, (version & 0x0000001f), (version & 0xffff0000) >> 16, version); else printk(KERN_ERR "Failed to communicate with the firmware\n"); /* Bring up the I2C buses */ saa7164_i2c_register(&dev->i2c_bus[0]); saa7164_i2c_register(&dev->i2c_bus[1]); saa7164_i2c_register(&dev->i2c_bus[2]); saa7164_gpio_setup(dev); saa7164_card_setup(dev); /* Parse the dynamic device configuration, find various * media endpoints (MPEG, WMV, PS, TS) and cache their * configuration details into the driver, so we can * reference them later during simething_register() func, * interrupt handlers, deferred work handlers etc. */ saa7164_api_enum_subdevs(dev); /* Begin to create the video sub-systems and register funcs */ if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) { if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) { printk(KERN_ERR "%s() Failed to register " "dvb adapters on porta\n", __func__); } } if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) { if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) { printk(KERN_ERR"%s() Failed to register " "dvb adapters on portb\n", __func__); } } if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) { if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) { printk(KERN_ERR"%s() Failed to register " "mpeg encoder\n", __func__); } } if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) { if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) { printk(KERN_ERR"%s() Failed to register " "mpeg encoder\n", __func__); } } if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) { if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) { printk(KERN_ERR"%s() Failed to register " "vbi device\n", __func__); } } if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) { if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) { printk(KERN_ERR"%s() Failed to register " "vbi device\n", __func__); } } saa7164_api_set_debug(dev, fw_debug); if (fw_debug) { dev->kthread = kthread_run(saa7164_thread_function, dev, "saa7164 debug"); if (!dev->kthread) printk(KERN_ERR "%s() Failed to create " "debug kernel thread\n", __func__); } } /* != BOARD_UNKNOWN */ else printk(KERN_ERR "%s() Unsupported board detected, " "registering without firmware\n", __func__); dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug); dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs); fail_fw: return 0; fail_irq: saa7164_dev_unregister(dev); fail_free: kfree(dev); return err; } static void saa7164_shutdown(struct saa7164_dev *dev) { dprintk(1, "%s()\n", __func__); } static void __devexit saa7164_finidev(struct pci_dev *pci_dev) { struct saa7164_dev *dev = pci_get_drvdata(pci_dev); if (dev->board != SAA7164_BOARD_UNKNOWN) { if (fw_debug && dev->kthread) { kthread_stop(dev->kthread); dev->kthread = NULL; } if (dev->firmwareloaded) saa7164_api_set_debug(dev, 0x00); } saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1], &dev->ports[SAA7164_PORT_ENC1].irq_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1], &dev->ports[SAA7164_PORT_ENC1].svc_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1], &dev->ports[SAA7164_PORT_ENC1].irq_svc_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1], &dev->ports[SAA7164_PORT_ENC1].read_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_ENC1], &dev->ports[SAA7164_PORT_ENC1].poll_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI1], &dev->ports[SAA7164_PORT_VBI1].read_interval); saa7164_histogram_print(&dev->ports[SAA7164_PORT_VBI2], &dev->ports[SAA7164_PORT_VBI2].poll_interval); saa7164_shutdown(dev); if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS1]); if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) saa7164_dvb_unregister(&dev->ports[SAA7164_PORT_TS2]); if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC1]); if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) saa7164_encoder_unregister(&dev->ports[SAA7164_PORT_ENC2]); if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI1]); if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) saa7164_vbi_unregister(&dev->ports[SAA7164_PORT_VBI2]); saa7164_i2c_unregister(&dev->i2c_bus[0]); saa7164_i2c_unregister(&dev->i2c_bus[1]); saa7164_i2c_unregister(&dev->i2c_bus[2]); pci_disable_device(pci_dev); /* unregister stuff */ free_irq(pci_dev->irq, dev); pci_set_drvdata(pci_dev, NULL); mutex_lock(&devlist); list_del(&dev->devlist); mutex_unlock(&devlist); saa7164_dev_unregister(dev); kfree(dev); } static struct pci_device_id saa7164_pci_tbl[] = { { /* SAA7164 */ .vendor = 0x1131, .device = 0x7164, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, saa7164_pci_tbl); static struct pci_driver saa7164_pci_driver = { .name = "saa7164", .id_table = saa7164_pci_tbl, .probe = saa7164_initdev, .remove = __devexit_p(saa7164_finidev), /* TODO */ .suspend = NULL, .resume = NULL, }; static int __init saa7164_init(void) { printk(KERN_INFO "saa7164 driver loaded\n"); #ifdef CONFIG_PROC_FS saa7164_proc_create(); #endif return pci_register_driver(&saa7164_pci_driver); } static void __exit saa7164_fini(void) { #ifdef CONFIG_PROC_FS remove_proc_entry("saa7164", NULL); #endif pci_unregister_driver(&saa7164_pci_driver); } module_init(saa7164_init); module_exit(saa7164_fini);
gpl-2.0
mightyme/linux-3.0.39-m04x
drivers/gpu/drm/r128/r128_state.c
8414
41298
/* r128_state.c -- State support for r128 -*- linux-c -*- * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com */ /* * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include "drmP.h" #include "drm.h" #include "r128_drm.h" #include "r128_drv.h" /* ================================================================ * CCE hardware state programming functions */ static void r128_emit_clip_rects(drm_r128_private_t *dev_priv, struct drm_clip_rect *boxes, int count) { u32 aux_sc_cntl = 0x00000000; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING((count < 3 ? count : 3) * 5 + 2); if (count >= 1) { OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3)); OUT_RING(boxes[0].x1); OUT_RING(boxes[0].x2 - 1); OUT_RING(boxes[0].y1); OUT_RING(boxes[0].y2 - 1); aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR); } if (count >= 2) { OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3)); OUT_RING(boxes[1].x1); OUT_RING(boxes[1].x2 - 1); OUT_RING(boxes[1].y1); OUT_RING(boxes[1].y2 - 1); aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR); } if (count >= 3) { OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3)); OUT_RING(boxes[2].x1); OUT_RING(boxes[2].x2 - 1); OUT_RING(boxes[2].y1); OUT_RING(boxes[2].y2 - 1); aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR); } OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0)); OUT_RING(aux_sc_cntl); ADVANCE_RING(); } static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0)); OUT_RING(ctx->scale_3d_cntl); ADVANCE_RING(); } static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(13); OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11)); OUT_RING(ctx->dst_pitch_offset_c); OUT_RING(ctx->dp_gui_master_cntl_c); OUT_RING(ctx->sc_top_left_c); OUT_RING(ctx->sc_bottom_right_c); OUT_RING(ctx->z_offset_c); OUT_RING(ctx->z_pitch_c); OUT_RING(ctx->z_sten_cntl_c); OUT_RING(ctx->tex_cntl_c); OUT_RING(ctx->misc_3d_state_cntl_reg); OUT_RING(ctx->texture_clr_cmp_clr_c); OUT_RING(ctx->texture_clr_cmp_msk_c); OUT_RING(ctx->fog_color_c); ADVANCE_RING(); } static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(3); OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP)); OUT_RING(ctx->setup_cntl); OUT_RING(ctx->pm4_vc_fpu_setup); ADVANCE_RING(); } static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(5); OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); OUT_RING(ctx->dp_write_mask); OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1)); OUT_RING(ctx->sten_ref_mask_c); OUT_RING(ctx->plane_3d_mask_c); ADVANCE_RING(); } static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0)); OUT_RING(ctx->window_xy_offset); ADVANCE_RING(); } static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0]; int i; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS); OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C, 2 + R128_MAX_TEXTURE_LEVELS)); OUT_RING(tex->tex_cntl); OUT_RING(tex->tex_combine_cntl); OUT_RING(ctx->tex_size_pitch_c); for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) OUT_RING(tex->tex_offset[i]); OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1)); OUT_RING(ctx->constant_color_c); OUT_RING(tex->tex_border_color); ADVANCE_RING(); } static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; int i; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS); OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS)); OUT_RING(tex->tex_cntl); OUT_RING(tex->tex_combine_cntl); for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) OUT_RING(tex->tex_offset[i]); OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0)); OUT_RING(tex->tex_border_color); ADVANCE_RING(); } static void r128_emit_state(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; DRM_DEBUG("dirty=0x%08x\n", dirty); if (dirty & R128_UPLOAD_CORE) { r128_emit_core(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_CORE; } if (dirty & R128_UPLOAD_CONTEXT) { r128_emit_context(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT; } if (dirty & R128_UPLOAD_SETUP) { r128_emit_setup(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_SETUP; } if (dirty & R128_UPLOAD_MASKS) { r128_emit_masks(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_MASKS; } if (dirty & R128_UPLOAD_WINDOW) { r128_emit_window(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_WINDOW; } if (dirty & R128_UPLOAD_TEX0) { r128_emit_tex0(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_TEX0; } if (dirty & R128_UPLOAD_TEX1) { r128_emit_tex1(dev_priv); sarea_priv->dirty &= ~R128_UPLOAD_TEX1; } /* Turn off the texture cache flushing */ sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH; sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE; } #if R128_PERFORMANCE_BOXES /* ================================================================ * Performance monitoring functions */ static void r128_clear_box(drm_r128_private_t *dev_priv, int x, int y, int w, int h, int r, int g, int b) { u32 pitch, offset; u32 fb_bpp, color; RING_LOCALS; switch (dev_priv->fb_bpp) { case 16: fb_bpp = R128_GMC_DST_16BPP; color = (((r & 0xf8) << 8) | ((g & 0xfc) << 3) | ((b & 0xf8) >> 3)); break; case 24: fb_bpp = R128_GMC_DST_24BPP; color = ((r << 16) | (g << 8) | b); break; case 32: fb_bpp = R128_GMC_DST_32BPP; color = (((0xff) << 24) | (r << 16) | (g << 8) | b); break; default: return; } offset = dev_priv->back_offset; pitch = dev_priv->back_pitch >> 3; BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | fb_bpp | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS); OUT_RING((pitch << 21) | (offset >> 5)); OUT_RING(color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) { if (atomic_read(&dev_priv->idle_count) == 0) r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); else atomic_set(&dev_priv->idle_count, 0); } #endif /* ================================================================ * CCE command dispatch functions */ static void r128_print_dirty(const char *msg, unsigned int flags) { DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n", msg, flags, (flags & R128_UPLOAD_CORE) ? "core, " : "", (flags & R128_UPLOAD_CONTEXT) ? "context, " : "", (flags & R128_UPLOAD_SETUP) ? "setup, " : "", (flags & R128_UPLOAD_TEX0) ? "tex0, " : "", (flags & R128_UPLOAD_TEX1) ? "tex1, " : "", (flags & R128_UPLOAD_MASKS) ? "masks, " : "", (flags & R128_UPLOAD_WINDOW) ? "window, " : "", (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "", (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); } static void r128_cce_dispatch_clear(struct drm_device *dev, drm_r128_clear_t *clear) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; unsigned int flags = clear->flags; int i; RING_LOCALS; DRM_DEBUG("\n"); if (dev_priv->page_flipping && dev_priv->current_page == 1) { unsigned int tmp = flags; flags &= ~(R128_FRONT | R128_BACK); if (tmp & R128_FRONT) flags |= R128_BACK; if (tmp & R128_BACK) flags |= R128_FRONT; } for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2, flags); if (flags & (R128_FRONT | R128_BACK)) { BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0)); OUT_RING(clear->color_mask); ADVANCE_RING(); } if (flags & R128_FRONT) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS); OUT_RING(dev_priv->front_pitch_offset_c); OUT_RING(clear->clear_color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } if (flags & R128_BACK) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->color_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS); OUT_RING(dev_priv->back_pitch_offset_c); OUT_RING(clear->clear_color); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } if (flags & R128_DEPTH) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(clear->clear_depth); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } } } static void r128_cce_dispatch_swap(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; int i; RING_LOCALS; DRM_DEBUG("\n"); #if R128_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ r128_cce_performance_boxes(dev_priv); #endif for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; BEGIN_RING(7); OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_NONE | (dev_priv->color_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_S | R128_DP_SRC_SOURCE_MEMORY | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS); /* Make this work even if front & back are flipped: */ if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_pitch_offset_c); OUT_RING(dev_priv->front_pitch_offset_c); } else { OUT_RING(dev_priv->front_pitch_offset_c); OUT_RING(dev_priv->back_pitch_offset_c); } OUT_RING((x << 16) | y); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); ADVANCE_RING(); } /* Increment the frame counter. The client-side 3D driver must * throttle the framerate by waiting for this value before * performing the swapbuffer ioctl. */ dev_priv->sarea_priv->last_frame++; BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); OUT_RING(dev_priv->sarea_priv->last_frame); ADVANCE_RING(); } static void r128_cce_dispatch_flip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("page=%d pfCurrentPage=%d\n", dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage); #if R128_PERFORMANCE_BOXES /* Do some trivial performance monitoring... */ r128_cce_performance_boxes(dev_priv); #endif BEGIN_RING(4); R128_WAIT_UNTIL_PAGE_FLIPPED(); OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0)); if (dev_priv->current_page == 0) OUT_RING(dev_priv->back_offset); else OUT_RING(dev_priv->front_offset); ADVANCE_RING(); /* Increment the frame counter. The client-side 3D driver must * throttle the framerate by waiting for this value before * performing the swapbuffer ioctl. */ dev_priv->sarea_priv->last_frame++; dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page = 1 - dev_priv->current_page; BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0)); OUT_RING(dev_priv->sarea_priv->last_frame); ADVANCE_RING(); } static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int format = sarea_priv->vc_format; int offset = buf->bus_address; int size = buf->used; int prim = buf_priv->prim; int i = 0; RING_LOCALS; DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox); if (0) r128_print_dirty("dispatch_vertex", sarea_priv->dirty); if (buf->used) { buf_priv->dispatched = 1; if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) r128_emit_state(dev_priv); do { /* Emit the next set of up to three cliprects */ if (i < sarea_priv->nbox) { r128_emit_clip_rects(dev_priv, &sarea_priv->boxes[i], sarea_priv->nbox - i); } /* Emit the vertex buffer rendering commands */ BEGIN_RING(5); OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3)); OUT_RING(offset); OUT_RING(size); OUT_RING(format); OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST | (size << R128_CCE_VC_CNTL_NUM_SHIFT)); ADVANCE_RING(); i += 3; } while (i < sarea_priv->nbox); } if (buf_priv->discard) { buf_priv->age = dev_priv->sarea_priv->last_dispatch; /* Emit the vertex buffer age */ BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); OUT_RING(buf_priv->age); ADVANCE_RING(); buf->pending = 1; buf->used = 0; /* FIXME: Check dispatched field */ buf_priv->dispatched = 0; } dev_priv->sarea_priv->last_dispatch++; sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; sarea_priv->nbox = 0; } static void r128_cce_dispatch_indirect(struct drm_device *dev, struct drm_buf *buf, int start, int end) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; RING_LOCALS; DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); if (start != end) { int offset = buf->bus_address + start; int dwords = (end - start + 3) / sizeof(u32); /* Indirect buffer data must be an even number of * dwords, so if we've been given an odd number we must * pad the data with a Type-2 CCE packet. */ if (dwords & 1) { u32 *data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset + start); data[dwords++] = cpu_to_le32(R128_CCE_PACKET2); } buf_priv->dispatched = 1; /* Fire off the indirect buffer */ BEGIN_RING(3); OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1)); OUT_RING(offset); OUT_RING(dwords); ADVANCE_RING(); } if (buf_priv->discard) { buf_priv->age = dev_priv->sarea_priv->last_dispatch; /* Emit the indirect buffer age */ BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); OUT_RING(buf_priv->age); ADVANCE_RING(); buf->pending = 1; buf->used = 0; /* FIXME: Check dispatched field */ buf_priv->dispatched = 0; } dev_priv->sarea_priv->last_dispatch++; } static void r128_cce_dispatch_indices(struct drm_device *dev, struct drm_buf *buf, int start, int end, int count) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int format = sarea_priv->vc_format; int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset; int prim = buf_priv->prim; u32 *data; int dwords; int i = 0; RING_LOCALS; DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count); if (0) r128_print_dirty("dispatch_indices", sarea_priv->dirty); if (start != end) { buf_priv->dispatched = 1; if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) r128_emit_state(dev_priv); dwords = (end - start + 3) / sizeof(u32); data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset + start); data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, dwords - 2)); data[1] = cpu_to_le32(offset); data[2] = cpu_to_le32(R128_MAX_VB_VERTS); data[3] = cpu_to_le32(format); data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND | (count << 16))); if (count & 0x1) { #ifdef __LITTLE_ENDIAN data[dwords - 1] &= 0x0000ffff; #else data[dwords - 1] &= 0xffff0000; #endif } do { /* Emit the next set of up to three cliprects */ if (i < sarea_priv->nbox) { r128_emit_clip_rects(dev_priv, &sarea_priv->boxes[i], sarea_priv->nbox - i); } r128_cce_dispatch_indirect(dev, buf, start, end); i += 3; } while (i < sarea_priv->nbox); } if (buf_priv->discard) { buf_priv->age = dev_priv->sarea_priv->last_dispatch; /* Emit the vertex buffer age */ BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0)); OUT_RING(buf_priv->age); ADVANCE_RING(); buf->pending = 1; /* FIXME: Check dispatched field */ buf_priv->dispatched = 0; } dev_priv->sarea_priv->last_dispatch++; sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS; sarea_priv->nbox = 0; } static int r128_cce_dispatch_blit(struct drm_device *dev, struct drm_file *file_priv, drm_r128_blit_t *blit) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; u32 *data; int dword_shift, dwords; RING_LOCALS; DRM_DEBUG("\n"); /* The compiler won't optimize away a division by a variable, * even if the only legal values are powers of two. Thus, we'll * use a shift instead. */ switch (blit->format) { case R128_DATATYPE_ARGB8888: dword_shift = 0; break; case R128_DATATYPE_ARGB1555: case R128_DATATYPE_RGB565: case R128_DATATYPE_ARGB4444: case R128_DATATYPE_YVYU422: case R128_DATATYPE_VYUY422: dword_shift = 1; break; case R128_DATATYPE_CI8: case R128_DATATYPE_RGB8: dword_shift = 2; break; default: DRM_ERROR("invalid blit format %d\n", blit->format); return -EINVAL; } /* Flush the pixel cache, and mark the contents as Read Invalid. * This ensures no pixel data gets mixed up with the texture * data from the host data blit, otherwise part of the texture * image may be corrupted. */ BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI); ADVANCE_RING(); /* Dispatch the indirect buffer. */ buf = dma->buflist[blit->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", blit->idx); return -EINVAL; } buf_priv->discard = 1; dwords = (blit->width * blit->height) >> dword_shift; data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6)); data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_NONE | (blit->format << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_S | R128_DP_SRC_SOURCE_HOST_DATA | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS)); data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5)); data[3] = cpu_to_le32(0xffffffff); data[4] = cpu_to_le32(0xffffffff); data[5] = cpu_to_le32((blit->y << 16) | blit->x); data[6] = cpu_to_le32((blit->height << 16) | blit->width); data[7] = cpu_to_le32(dwords); buf->used = (dwords + 8) * sizeof(u32); r128_cce_dispatch_indirect(dev, buf, 0, buf->used); /* Flush the pixel cache after the blit completes. This ensures * the texture data is written out to memory before rendering * continues. */ BEGIN_RING(2); OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0)); OUT_RING(R128_PC_FLUSH_GUI); ADVANCE_RING(); return 0; } /* ================================================================ * Tiled depth buffer management * * FIXME: These should all set the destination write mask for when we * have hardware stencil support. */ static int r128_cce_dispatch_write_span(struct drm_device *dev, drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, x, y; u32 *buffer; u8 *mask; int i, buffer_size, mask_size; RING_LOCALS; DRM_DEBUG("\n"); count = depth->n; if (count > 4096 || count <= 0) return -EMSGSIZE; if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) return -EFAULT; if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) return -EFAULT; buffer_size = depth->n * sizeof(u32); buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { kfree(buffer); return -EFAULT; } mask_size = depth->n * sizeof(u8); if (depth->mask) { mask = kmalloc(mask_size, GFP_KERNEL); if (mask == NULL) { kfree(buffer); return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { kfree(buffer); kfree(mask); return -EFAULT; } for (i = 0; i < count; i++, x++) { if (mask[i]) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(buffer[i]); OUT_RING((x << 16) | y); OUT_RING((1 << 16) | 1); ADVANCE_RING(); } } kfree(mask); } else { for (i = 0; i < count; i++, x++) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(buffer[i]); OUT_RING((x << 16) | y); OUT_RING((1 << 16) | 1); ADVANCE_RING(); } } kfree(buffer); return 0; } static int r128_cce_dispatch_write_pixels(struct drm_device *dev, drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, *x, *y; u32 *buffer; u8 *mask; int i, xbuf_size, ybuf_size, buffer_size, mask_size; RING_LOCALS; DRM_DEBUG("\n"); count = depth->n; if (count > 4096 || count <= 0) return -EMSGSIZE; xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); x = kmalloc(xbuf_size, GFP_KERNEL); if (x == NULL) return -ENOMEM; y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { kfree(x); return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { kfree(x); kfree(y); return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) { kfree(x); kfree(y); return -EFAULT; } buffer_size = depth->n * sizeof(u32); buffer = kmalloc(buffer_size, GFP_KERNEL); if (buffer == NULL) { kfree(x); kfree(y); return -ENOMEM; } if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) { kfree(x); kfree(y); kfree(buffer); return -EFAULT; } if (depth->mask) { mask_size = depth->n * sizeof(u8); mask = kmalloc(mask_size, GFP_KERNEL); if (mask == NULL) { kfree(x); kfree(y); kfree(buffer); return -ENOMEM; } if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) { kfree(x); kfree(y); kfree(buffer); kfree(mask); return -EFAULT; } for (i = 0; i < count; i++) { if (mask[i]) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(buffer[i]); OUT_RING((x[i] << 16) | y[i]); OUT_RING((1 << 16) | 1); ADVANCE_RING(); } } kfree(mask); } else { for (i = 0; i < count; i++) { BEGIN_RING(6); OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4)); OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_SOLID_COLOR | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_P | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(buffer[i]); OUT_RING((x[i] << 16) | y[i]); OUT_RING((1 << 16) | 1); ADVANCE_RING(); } } kfree(x); kfree(y); kfree(buffer); return 0; } static int r128_cce_dispatch_read_span(struct drm_device *dev, drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, x, y; RING_LOCALS; DRM_DEBUG("\n"); count = depth->n; if (count > 4096 || count <= 0) return -EMSGSIZE; if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) return -EFAULT; if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) return -EFAULT; BEGIN_RING(7); OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_NONE | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_S | R128_DP_SRC_SOURCE_MEMORY | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(dev_priv->span_pitch_offset_c); OUT_RING((x << 16) | y); OUT_RING((0 << 16) | 0); OUT_RING((count << 16) | 1); ADVANCE_RING(); return 0; } static int r128_cce_dispatch_read_pixels(struct drm_device *dev, drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, *x, *y; int i, xbuf_size, ybuf_size; RING_LOCALS; DRM_DEBUG("\n"); count = depth->n; if (count > 4096 || count <= 0) return -EMSGSIZE; if (count > dev_priv->depth_pitch) count = dev_priv->depth_pitch; xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); x = kmalloc(xbuf_size, GFP_KERNEL); if (x == NULL) return -ENOMEM; y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { kfree(x); return -ENOMEM; } if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) { kfree(x); kfree(y); return -EFAULT; } if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) { kfree(x); kfree(y); return -EFAULT; } for (i = 0; i < count; i++) { BEGIN_RING(7); OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5)); OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL | R128_GMC_DST_PITCH_OFFSET_CNTL | R128_GMC_BRUSH_NONE | (dev_priv->depth_fmt << 8) | R128_GMC_SRC_DATATYPE_COLOR | R128_ROP3_S | R128_DP_SRC_SOURCE_MEMORY | R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS); OUT_RING(dev_priv->depth_pitch_offset_c); OUT_RING(dev_priv->span_pitch_offset_c); OUT_RING((x[i] << 16) | y[i]); OUT_RING((i << 16) | 0); OUT_RING((1 << 16) | 1); ADVANCE_RING(); } kfree(x); kfree(y); return 0; } /* ================================================================ * Polygon stipple */ static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple) { drm_r128_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; DRM_DEBUG("\n"); BEGIN_RING(33); OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31)); for (i = 0; i < 32; i++) OUT_RING(stipple[i]); ADVANCE_RING(); } /* ================================================================ * IOCTL functions */ static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv; drm_r128_clear_t *clear = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); sarea_priv = dev_priv->sarea_priv; if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; r128_cce_dispatch_clear(dev, clear); COMMIT_RING(); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS; return 0; } static int r128_do_init_pageflip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET); dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL); R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset); R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL); dev_priv->page_flipping = 1; dev_priv->current_page = 0; dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page; return 0; } static int r128_do_cleanup_pageflip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset); R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl); if (dev_priv->current_page != 0) { r128_cce_dispatch_flip(dev); COMMIT_RING(); } dev_priv->page_flipping = 0; return 0; } /* Swapping and flipping are different operations, need different ioctls. * They can & should be intermixed to support multiple 3d windows. */ static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (!dev_priv->page_flipping) r128_do_init_pageflip(dev); r128_cce_dispatch_flip(dev); COMMIT_RING(); return 0; } static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; r128_cce_dispatch_swap(dev); dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS); COMMIT_RING(); return 0; } static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); if (vertex->idx < 0 || vertex->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", vertex->idx, dma->buf_count - 1); return -EINVAL; } if (vertex->prim < 0 || vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { DRM_ERROR("buffer prim %d\n", vertex->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[vertex->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", vertex->idx); return -EINVAL; } buf->used = vertex->count; buf_priv->prim = vertex->prim; buf_priv->discard = vertex->discard; r128_cce_dispatch_vertex(dev, buf); COMMIT_RING(); return 0; } static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indices_t *elts = data; int count; LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, elts->idx, elts->start, elts->end, elts->discard); if (elts->idx < 0 || elts->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", elts->idx, dma->buf_count - 1); return -EINVAL; } if (elts->prim < 0 || elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) { DRM_ERROR("buffer prim %d\n", elts->prim); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf = dma->buflist[elts->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", elts->idx); return -EINVAL; } count = (elts->end - elts->start) / sizeof(u16); elts->start -= R128_INDEX_PRIM_OFFSET; if (elts->start & 0x7) { DRM_ERROR("misaligned buffer 0x%x\n", elts->start); return -EINVAL; } if (elts->start < buf->used) { DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used); return -EINVAL; } buf->used = elts->end; buf_priv->prim = elts->prim; buf_priv->discard = elts->discard; r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count); COMMIT_RING(); return 0; } static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_blit_t *blit = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); if (blit->idx < 0 || blit->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", blit->idx, dma->buf_count - 1); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); ret = r128_cce_dispatch_blit(dev, file_priv, blit); COMMIT_RING(); return ret; } static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_depth_t *depth = data; int ret; LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); RING_SPACE_TEST_WITH_RETURN(dev_priv); ret = -EINVAL; switch (depth->func) { case R128_WRITE_SPAN: ret = r128_cce_dispatch_write_span(dev, depth); break; case R128_WRITE_PIXELS: ret = r128_cce_dispatch_write_pixels(dev, depth); break; case R128_READ_SPAN: ret = r128_cce_dispatch_read_span(dev, depth); break; case R128_READ_PIXELS: ret = r128_cce_dispatch_read_pixels(dev, depth); break; } COMMIT_RING(); return ret; } static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_stipple_t *stipple = data; u32 mask[32]; LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) return -EFAULT; RING_SPACE_TEST_WITH_RETURN(dev_priv); r128_cce_dispatch_stipple(dev, mask); COMMIT_RING(); return 0; } static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indirect_t *indirect = data; #if 0 RING_LOCALS; #endif LOCK_TEST_WITH_RETURN(dev, file_priv); DEV_INIT_TEST_WITH_RETURN(dev_priv); DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", indirect->idx, indirect->start, indirect->end, indirect->discard); if (indirect->idx < 0 || indirect->idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", indirect->idx, dma->buf_count - 1); return -EINVAL; } buf = dma->buflist[indirect->idx]; buf_priv = buf->dev_private; if (buf->file_priv != file_priv) { DRM_ERROR("process %d using buffer owned by %p\n", DRM_CURRENTPID, buf->file_priv); return -EINVAL; } if (buf->pending) { DRM_ERROR("sending pending buffer %d\n", indirect->idx); return -EINVAL; } if (indirect->start < buf->used) { DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n", indirect->start, buf->used); return -EINVAL; } RING_SPACE_TEST_WITH_RETURN(dev_priv); VB_AGE_TEST_WITH_RETURN(dev_priv); buf->used = indirect->end; buf_priv->discard = indirect->discard; #if 0 /* Wait for the 3D stream to idle before the indirect buffer * containing 2D acceleration commands is processed. */ BEGIN_RING(2); RADEON_WAIT_UNTIL_3D_IDLE(); ADVANCE_RING(); #endif /* Dispatch the indirect buffer full of commands from the * X server. This is insecure and is thus only available to * privileged clients. */ r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end); COMMIT_RING(); return 0; } static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_getparam_t *param = data; int value; DEV_INIT_TEST_WITH_RETURN(dev_priv); DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); switch (param->param) { case R128_PARAM_IRQ_NR: value = drm_dev_to_irq(dev); break; default: return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; if (dev_priv->page_flipping) r128_do_cleanup_pageflip(dev); } } void r128_driver_lastclose(struct drm_device *dev) { r128_do_cleanup_cce(dev); } struct drm_ioctl_desc r128_ioctls[] = { DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH), DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH), }; int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
gpl-2.0
regalstreak/S7262-Kernel
arch/mips/rb532/prom.c
8670
3647
/* * RouterBoard 500 specific prom routines * * Copyright (C) 2003, Peter Sadik <peter.sadik@idt.com> * Copyright (C) 2005-2006, P.Christeas <p_christ@hol.gr> * Copyright (C) 2007, Gabor Juhos <juhosg@openwrt.org> * Felix Fietkau <nbd@openwrt.org> * Florian Fainelli <florian@openwrt.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * */ #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/string.h> #include <linux/console.h> #include <linux/bootmem.h> #include <linux/ioport.h> #include <linux/blkdev.h> #include <asm/bootinfo.h> #include <asm/mach-rc32434/ddr.h> #include <asm/mach-rc32434/prom.h> unsigned int idt_cpu_freq = 132000000; EXPORT_SYMBOL(idt_cpu_freq); static struct resource ddr_reg[] = { { .name = "ddr-reg", .start = DDR0_PHYS_ADDR, .end = DDR0_PHYS_ADDR + sizeof(struct ddr_ram), .flags = IORESOURCE_MEM, } }; void __init prom_free_prom_memory(void) { /* No prom memory to free */ } static inline int match_tag(char *arg, const char *tag) { return strncmp(arg, tag, strlen(tag)) == 0; } static inline unsigned long tag2ul(char *arg, const char *tag) { char *num; num = arg + strlen(tag); return simple_strtoul(num, 0, 10); } void __init prom_setup_cmdline(void) { static char cmd_line[COMMAND_LINE_SIZE] __initdata; char *cp, *board; int prom_argc; char **prom_argv, **prom_envp; int i; prom_argc = fw_arg0; prom_argv = (char **) fw_arg1; prom_envp = (char **) fw_arg2; cp = cmd_line; /* Note: it is common that parameters start * at argv[1] and not argv[0], * however, our elf loader starts at [0] */ for (i = 0; i < prom_argc; i++) { if (match_tag(prom_argv[i], FREQ_TAG)) { idt_cpu_freq = tag2ul(prom_argv[i], FREQ_TAG); continue; } #ifdef IGNORE_CMDLINE_MEM /* parses out the "mem=xx" arg */ if (match_tag(prom_argv[i], MEM_TAG)) continue; #endif if (i > 0) *(cp++) = ' '; if (match_tag(prom_argv[i], BOARD_TAG)) { board = prom_argv[i] + strlen(BOARD_TAG); if (match_tag(board, BOARD_RB532A)) mips_machtype = MACH_MIKROTIK_RB532A; else mips_machtype = MACH_MIKROTIK_RB532; } strcpy(cp, prom_argv[i]); cp += strlen(prom_argv[i]); } *(cp++) = ' '; i = strlen(arcs_cmdline); if (i > 0) { *(cp++) = ' '; strcpy(cp, arcs_cmdline); cp += strlen(arcs_cmdline); } cmd_line[COMMAND_LINE_SIZE - 1] = '\0'; strcpy(arcs_cmdline, cmd_line); } void __init prom_init(void) { struct ddr_ram __iomem *ddr; phys_t memsize; phys_t ddrbase; ddr = ioremap_nocache(ddr_reg[0].start, ddr_reg[0].end - ddr_reg[0].start); if (!ddr) { printk(KERN_ERR "Unable to remap DDR register\n"); return; } ddrbase = (phys_t)&ddr->ddrbase; memsize = (phys_t)&ddr->ddrmask; memsize = 0 - memsize; prom_setup_cmdline(); /* give all RAM to boot allocator, * except for the first 0x400 and the last 0x200 bytes */ add_memory_region(ddrbase + 0x400, memsize - 0x600, BOOT_MEM_RAM); }
gpl-2.0
jollaman999/jolla-kernel_G_v30a-Stock
arch/sparc/mm/extable.c
13534
2252
/* * linux/arch/sparc/mm/extable.c */ #include <linux/module.h> #include <asm/uaccess.h> void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish) { } /* Caller knows they are in a range if ret->fixup == 0 */ const struct exception_table_entry * search_extable(const struct exception_table_entry *start, const struct exception_table_entry *last, unsigned long value) { const struct exception_table_entry *walk; /* Single insn entries are encoded as: * word 1: insn address * word 2: fixup code address * * Range entries are encoded as: * word 1: first insn address * word 2: 0 * word 3: last insn address + 4 bytes * word 4: fixup code address * * Deleted entries are encoded as: * word 1: unused * word 2: -1 * * See asm/uaccess.h for more details. */ /* 1. Try to find an exact match. */ for (walk = start; walk <= last; walk++) { if (walk->fixup == 0) { /* A range entry, skip both parts. */ walk++; continue; } /* A deleted entry; see trim_init_extable */ if (walk->fixup == -1) continue; if (walk->insn == value) return walk; } /* 2. Try to find a range match. */ for (walk = start; walk <= (last - 1); walk++) { if (walk->fixup) continue; if (walk[0].insn <= value && walk[1].insn > value) return walk; walk++; } return NULL; } #ifdef CONFIG_MODULES /* We could memmove them around; easier to mark the trimmed ones. */ void trim_init_extable(struct module *m) { unsigned int i; bool range; for (i = 0; i < m->num_exentries; i += range ? 2 : 1) { range = m->extable[i].fixup == 0; if (within_module_init(m->extable[i].insn, m)) { m->extable[i].fixup = -1; if (range) m->extable[i+1].fixup = -1; } if (range) i++; } } #endif /* CONFIG_MODULES */ /* Special extable search, which handles ranges. Returns fixup */ unsigned long search_extables_range(unsigned long addr, unsigned long *g2) { const struct exception_table_entry *entry; entry = search_exception_tables(addr); if (!entry) return 0; /* Inside range? Fix g2 and return correct fixup */ if (!entry->fixup) { *g2 = (addr - entry->insn) / 4; return (entry + 1)->fixup; } return entry->fixup; }
gpl-2.0
emxys1/imx6rex-linux-3.10.17
drivers/net/usb/smsc75xx.c
479
57069
/*************************************************************************** * * Copyright (C) 2007-2010 SMSC * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * *****************************************************************************/ #include <linux/module.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/bitrev.h> #include <linux/crc16.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include <linux/slab.h> #include "smsc75xx.h" #define SMSC_CHIPNAME "smsc75xx" #define SMSC_DRIVER_VERSION "1.0.0" #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) #define DEFAULT_FS_BURST_CAP_SIZE (6 * 1024 + 33 * FS_USB_PKT_SIZE) #define DEFAULT_BULK_IN_DELAY (0x00002000) #define MAX_SINGLE_PACKET_SIZE (9000) #define LAN75XX_EEPROM_MAGIC (0x7500) #define EEPROM_MAC_OFFSET (0x01) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) #define SMSC75XX_INTERNAL_PHY_ID (1) #define SMSC75XX_TX_OVERHEAD (8) #define MAX_RX_FIFO_SIZE (20 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) #define USB_VENDOR_ID_SMSC (0x0424) #define USB_PRODUCT_ID_LAN7500 (0x7500) #define USB_PRODUCT_ID_LAN7505 (0x7505) #define RXW_PADDING 2 #define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \ WAKE_MCAST | WAKE_ARP | WAKE_MAGIC) #define SUSPEND_SUSPEND0 (0x01) #define SUSPEND_SUSPEND1 (0x02) #define SUSPEND_SUSPEND2 (0x04) #define SUSPEND_SUSPEND3 (0x08) #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) struct smsc75xx_priv { struct usbnet *dev; u32 rfe_ctl; u32 wolopts; u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN]; struct mutex dataport_mutex; spinlock_t rfe_ctl_lock; struct work_struct set_multicast; u8 suspend_flags; }; struct usb_context { struct usb_ctrlrequest req; struct usbnet *dev; }; static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_read_cmd; else fn = usbnet_read_cmd_nopm; ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", index, ret); le32_to_cpus(&buf); *data = buf; return ret; } static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data, int in_pm) { u32 buf; int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); BUG_ON(!dev); if (!in_pm) fn = usbnet_write_cmd; else fn = usbnet_write_cmd_nopm; buf = data; cpu_to_le32s(&buf); ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); return ret; } static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 1); } static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 1); } static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, u32 *data) { return __smsc75xx_read_reg(dev, index, data, 0); } static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, u32 data) { return __smsc75xx_write_reg(dev, index, data, 0); } /* Loop until the read is completed with timeout * called with phy_mutex held */ static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev, int in_pm) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } if (!(val & MII_ACCESS_BUSY)) return 0; } while (!time_after(jiffies, start_time + HZ)); return -EIO; } static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n"); goto done; } /* set the address, index & direction (read from PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_READ | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx); goto done; } ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } ret = (u16)(val & 0xFFFF); done: mutex_unlock(&dev->phy_mutex); return ret; } static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval, int in_pm) { struct usbnet *dev = netdev_priv(netdev); u32 val, addr; int ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n"); goto done; } val = regval; ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } /* set the address, index & direction (write to PHY) */ phy_id &= dev->mii.phy_id_mask; idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) | MII_ACCESS_WRITE | MII_ACCESS_BUSY; ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm); if (ret < 0) { netdev_warn(dev->net, "Error writing MII_ACCESS\n"); goto done; } ret = __smsc75xx_phy_wait_not_busy(dev, in_pm); if (ret < 0) { netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx); goto done; } done: mutex_unlock(&dev->phy_mutex); } static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 1); } static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1); } static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) { return __smsc75xx_mdio_read(netdev, phy_id, idx, 0); } static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, int regval) { __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0); } static int smsc75xx_wait_eeprom(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) break; udelay(40); } while (!time_after(jiffies, start_time + HZ)); if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { netdev_warn(dev->net, "EEPROM read operation timeout\n"); return -EIO; } return 0; } static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = smsc75xx_read_reg(dev, E2P_CMD, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_CMD\n"); return ret; } if (!(val & E2P_CMD_BUSY)) return 0; udelay(40); } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy\n"); return -EIO; } static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; for (i = 0; i < length; i++) { val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; ret = smsc75xx_read_reg(dev, E2P_DATA, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading E2P_DATA\n"); return ret; } data[i] = val & 0xFF; offset++; } return 0; } static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length, u8 *data) { u32 val; int i, ret; BUG_ON(!dev); BUG_ON(!data); ret = smsc75xx_eeprom_confirm_not_busy(dev); if (ret) return ret; /* Issue write/erase enable command */ val = E2P_CMD_BUSY | E2P_CMD_EWEN; ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = smsc75xx_write_reg(dev, E2P_DATA, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_DATA\n"); return ret; } /* Send "write" command */ val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); ret = smsc75xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { netdev_warn(dev->net, "Error writing E2P_CMD\n"); return ret; } ret = smsc75xx_wait_eeprom(dev); if (ret < 0) return ret; offset++; } return 0; } static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev) { int i, ret; for (i = 0; i < 100; i++) { u32 dp_sel; ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); return ret; } if (dp_sel & DP_SEL_DPRDY) return 0; udelay(40); } netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n"); return -EIO; } static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 dp_sel; int i, ret; mutex_lock(&pdata->dataport_mutex); ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n"); goto done; } ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error reading DP_SEL\n"); goto done; } dp_sel &= ~DP_SEL_RSEL; dp_sel |= ram_select; ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_SEL\n"); goto done; } for (i = 0; i < length; i++) { ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_ADDR\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_DATA\n"); goto done; } ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); if (ret < 0) { netdev_warn(dev->net, "Error writing DP_CMD\n"); goto done; } ret = smsc75xx_dataport_wait_not_busy(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n"); goto done; } } done: mutex_unlock(&pdata->dataport_mutex); return ret; } /* returns hash bit number for given MAC address */ static u32 smsc75xx_hash(char addr[ETH_ALEN]) { return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; } static void smsc75xx_deferred_multicast_write(struct work_struct *param) { struct smsc75xx_priv *pdata = container_of(param, struct smsc75xx_priv, set_multicast); struct usbnet *dev = pdata->dev; int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) netdev_warn(dev->net, "Error writing RFE_CRL\n"); } static void smsc75xx_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int i; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); pdata->rfe_ctl &= ~(RFE_CTL_AU | RFE_CTL_AM | RFE_CTL_DPF | RFE_CTL_MHF); pdata->rfe_ctl |= RFE_CTL_AB; for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) pdata->multicast_hash_table[i] = 0; if (dev->net->flags & IFF_PROMISC) { netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; } else if (dev->net->flags & IFF_ALLMULTI) { netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; } else if (!netdev_mc_empty(dev->net)) { struct netdev_hw_addr *ha; netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n"); pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; netdev_for_each_mc_addr(ha, netdev) { u32 bitnum = smsc75xx_hash(ha->addr); pdata->multicast_hash_table[bitnum / 32] |= (1 << (bitnum % 32)); } } else { netif_dbg(dev, drv, dev->net, "receive own packets only\n"); pdata->rfe_ctl |= RFE_CTL_DPF; } spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_multicast); } static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; int ret; if (duplex == DUPLEX_FULL) { u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); if (cap & FLOW_CTRL_TX) { flow = (FLOW_TX_FCEN | 0xFFFF); /* set fct_flow thresholds to 20% and 80% */ fct_flow = (8 << 8) | 32; } if (cap & FLOW_CTRL_RX) flow |= FLOW_RX_FCEN; netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); } else { netif_dbg(dev, link, dev->net, "half duplex\n"); } ret = smsc75xx_write_reg(dev, FLOW, flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FLOW\n"); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); if (ret < 0) { netdev_warn(dev->net, "Error writing FCT_FLOW\n"); return ret; } return 0; } static int smsc75xx_link_reset(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; u16 lcladv, rmtadv; int ret; /* write to clear phy interrupt status */ smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, PHY_INT_SRC_CLEAR_ALL); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Error writing INT_STS\n"); return ret; } mii_check_media(mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n", ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv); return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); } static void smsc75xx_status(struct usbnet *dev, struct urb *urb) { u32 intdata; if (urb->actual_length != 4) { netdev_warn(dev->net, "unexpected urb length %d\n", urb->actual_length); return; } memcpy(&intdata, urb->transfer_buffer, 4); le32_to_cpus(&intdata); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT) usbnet_defer_kevent(dev, EVENT_LINK_RESET); else netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n", intdata); } static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) { return MAX_EEPROM_SIZE; } static int smsc75xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); ee->magic = LAN75XX_EEPROM_MAGIC; return smsc75xx_read_eeprom(dev, ee->offset, ee->len, data); } static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct usbnet *dev = netdev_priv(netdev); if (ee->magic != LAN75XX_EEPROM_MAGIC) { netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n", ee->magic); return -EINVAL; } return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data); } static void smsc75xx_ethtool_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); wolinfo->supported = SUPPORTED_WAKE; wolinfo->wolopts = pdata->wolopts; } static int smsc75xx_ethtool_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); int ret; pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); if (ret < 0) netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret); return ret; } static const struct ethtool_ops smsc75xx_ethtool_ops = { .get_link = usbnet_get_link, .nway_reset = usbnet_nway_reset, .get_drvinfo = usbnet_get_drvinfo, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .get_eeprom_len = smsc75xx_ethtool_get_eeprom_len, .get_eeprom = smsc75xx_ethtool_get_eeprom, .set_eeprom = smsc75xx_ethtool_set_eeprom, .get_wol = smsc75xx_ethtool_get_wol, .set_wol = smsc75xx_ethtool_set_wol, }; static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); if (!netif_running(netdev)) return -EINVAL; return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static void smsc75xx_init_mac_address(struct usbnet *dev) { /* try reading mac address from EEPROM */ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, dev->net->dev_addr) == 0) { if (is_valid_ether_addr(dev->net->dev_addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n"); return; } } /* no eeprom, or eeprom values are invalid. generate random MAC */ eth_hw_addr_random(dev->net); netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); } static int smsc75xx_set_mac_address(struct usbnet *dev) { u32 addr_lo = dev->net->dev_addr[0] | dev->net->dev_addr[1] << 8 | dev->net->dev_addr[2] << 16 | dev->net->dev_addr[3] << 24; u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); if (ret < 0) { netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret); return ret; } addr_hi |= ADDR_FILTX_FB_VALID; ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); if (ret < 0) { netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); if (ret < 0) netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret); return ret; } static int smsc75xx_phy_initialize(struct usbnet *dev) { int bmcr, ret, timeout = 0; /* Initialize MII structure */ dev->mii.dev = dev->net; dev->mii.mdio_read = smsc75xx_mdio_read; dev->mii.mdio_write = smsc75xx_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; dev->mii.supports_gmii = 1; dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; /* reset phy and wait for reset to complete */ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); do { msleep(10); bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); if (bmcr < 0) { netdev_warn(dev->net, "Error reading MII_BMCR\n"); return bmcr; } timeout++; } while ((bmcr & BMCR_RESET) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset\n"); return -EIO; } smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000, ADVERTISE_1000FULL); /* read and write to clear phy interrupt status */ ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT); mii_nway_restart(&dev->mii); netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n"); return 0; } static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) { int ret = 0; u32 buf; bool rxenabled; ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } rxenabled = ((buf & MAC_RX_RXEN) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } if (rxenabled) { buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } } return 0; } static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) { struct usbnet *dev = netdev_priv(netdev); int ret; if (new_mtu > MAX_SINGLE_PACKET_SIZE) return -EINVAL; ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac rx frame length\n"); return ret; } return usbnet_change_mtu(netdev, new_mtu); } /* Enable or disable Rx checksum offload engine */ static int smsc75xx_set_features(struct net_device *netdev, netdev_features_t features) { struct usbnet *dev = netdev_priv(netdev); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); unsigned long flags; int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); if (features & NETIF_F_RXCSUM) pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM; else pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM); spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* it's racing here! */ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) netdev_warn(dev->net, "Error writing RFE_CTL\n"); return ret; } static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) { int timeout = 0; do { u32 buf; int ret; ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } if (buf & PMT_CTL_DEV_RDY) return 0; msleep(10); timeout++; } while (timeout < 100); netdev_warn(dev->net, "timeout waiting for device ready\n"); return -EIO; } static int smsc75xx_reset(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 buf; int ret = 0, timeout; netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n"); ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_reset\n"); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } buf |= HW_CFG_LRST; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } timeout++; } while ((buf & HW_CFG_LRST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on completion of Lite Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n"); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } buf |= PMT_CTL_PHY_RST; ret = smsc75xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret); return ret; } timeout = 0; do { msleep(10); ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret); return ret; } timeout++; } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout waiting for PHY Reset\n"); return -EIO; } netif_dbg(dev, ifup, dev->net, "PHY reset complete\n"); ret = smsc75xx_set_mac_address(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to set mac address\n"); return ret; } netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n", dev->net->dev_addr); ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n", buf); buf |= HW_CFG_BIR; ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n", buf); if (!turbo_mode) { buf = 0; dev->rx_urb_size = MAX_SINGLE_PACKET_SIZE; } else if (dev->udev->speed == USB_SPEED_HIGH) { buf = DEFAULT_HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_HS_BURST_CAP_SIZE; } else { buf = DEFAULT_FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; } netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); ret = smsc75xx_write_reg(dev, BURST_CAP, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BURST_CAP after writing: 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); if (ret < 0) { netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf); if (turbo_mode) { ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); buf |= (HW_CFG_MEF | HW_CFG_BCE); ret = smsc75xx_write_reg(dev, HW_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf); } /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf); buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, ID_REV, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret); return ret; } /* only set default GPIO/LED settings if no EEPROM is detected */ if (!(buf & E2P_CMD_LOADED)) { ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret); return ret; } buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret); return ret; } } ret = smsc75xx_write_reg(dev, FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret); return ret; } ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret); return ret; } /* Don't need rfe_ctl_lock during initialisation */ ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) { netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n", pdata->rfe_ctl); /* Enable or disable checksum offload engines */ smsc75xx_set_features(dev->net, dev->net->features); smsc75xx_set_multicast(dev->net); ret = smsc75xx_phy_initialize(dev); if (ret < 0) { netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret); return ret; } /* enable PHY interrupts */ buf |= INT_ENP_PHY_INT; ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret); return ret; } /* allow mac to detect speed and duplex from phy */ ret = smsc75xx_read_reg(dev, MAC_CR, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret); return ret; } buf |= (MAC_CR_ADD | MAC_CR_ASD); ret = smsc75xx_write_reg(dev, MAC_CR, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret); return ret; } ret = smsc75xx_read_reg(dev, MAC_TX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret); return ret; } buf |= MAC_TX_TXEN; ret = smsc75xx_write_reg(dev, MAC_TX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret); return ret; } buf |= FCT_TX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); if (ret < 0) { netdev_warn(dev->net, "Failed to set max rx frame length\n"); return ret; } ret = smsc75xx_read_reg(dev, MAC_RX, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); return ret; } buf |= MAC_RX_RXEN; ret = smsc75xx_write_reg(dev, MAC_RX, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf); ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); if (ret < 0) { netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret); return ret; } buf |= FCT_RX_CTL_EN; ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); if (ret < 0) { netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret); return ret; } netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf); netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n"); return 0; } static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = smsc75xx_ioctl, .ndo_set_rx_mode = smsc75xx_set_multicast, .ndo_set_features = smsc75xx_set_features, }; static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = NULL; int ret; printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret); return ret; } dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), GFP_KERNEL); pdata = (struct smsc75xx_priv *)(dev->data[0]); if (!pdata) return -ENOMEM; pdata->dev = dev; spin_lock_init(&pdata->rfe_ctl_lock); mutex_init(&pdata->dataport_mutex); INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); if (DEFAULT_TX_CSUM_ENABLE) dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ret = smsc75xx_wait_ready(dev, 0); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); return ret; } smsc75xx_init_mac_address(dev); /* Init all registers */ ret = smsc75xx_reset(dev); if (ret < 0) { netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); return ret; } dev->net->netdev_ops = &smsc75xx_netdev_ops; dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); if (pdata) { netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; dev->data[0] = 0; } } static u16 smsc_crc(const u8 *buffer, size_t len) { return bitrev16(crc16(0xFFFF, buffer, len)); } static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg, u32 wuf_mask1) { int cfg_base = WUF_CFGX + filter * 4; int mask_base = WUF_MASKX + filter * 16; int ret; ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 8, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } ret = smsc75xx_write_reg(dev, mask_base + 12, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_MASKX\n"); return ret; } return 0; } static int smsc75xx_enter_suspend0(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST)); val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND0; return 0; } static int smsc75xx_enter_suspend1(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_1; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND1; return 0; } static int smsc75xx_enter_suspend2(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_2; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND2; return 0; } static int smsc75xx_enter_suspend3(struct usbnet *dev) { struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val; int ret; ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading FCT_RX_CTL\n"); return ret; } if (val & FCT_RX_CTL_RXUSED) { netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n"); return -EBUSY; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } /* clear wol status */ val &= ~PMT_CTL_WUPS; val |= PMT_CTL_WUPS_WOL; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } pdata->suspend_flags |= SUSPEND_SUSPEND3; return 0; } static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask) { struct mii_if_info *mii = &dev->mii; int ret; netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n"); /* read to clear */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_SRC\n"); return ret; } /* enable interrupt source */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_INT_MASK\n"); return ret; } ret |= mask; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret); return 0; } static int smsc75xx_link_ok_nopm(struct usbnet *dev) { struct mii_if_info *mii = &dev->mii; int ret; /* first, a dummy read, needed to latch some MII phys */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR); if (ret < 0) { netdev_warn(dev->net, "Error reading MII_BMSR\n"); return ret; } return !!(ret & BMSR_LSTATUS); } static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up) { int ret; if (!netif_running(dev->net)) { /* interface is ifconfig down so fully power down hw */ netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n"); return smsc75xx_enter_suspend2(dev); } if (!link_up) { /* link is down so enter EDPD mode */ netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n"); /* enable PHY wakeup events for if cable is attached */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_ANEG_COMP); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_info(dev->net, "entering SUSPEND1 mode\n"); return smsc75xx_enter_suspend1(dev); } /* enable PHY wakeup events so we remote wakeup if cable is pulled */ ret = smsc75xx_enable_phy_wakeup_interrupts(dev, PHY_INT_MASK_LINK_DOWN); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); return ret; } netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n"); return smsc75xx_enter_suspend3(dev); } static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u32 val, link_up; int ret; ret = usbnet_suspend(intf, message); if (ret < 0) { netdev_warn(dev->net, "usbnet_suspend error\n"); return ret; } if (pdata->suspend_flags) { netdev_warn(dev->net, "error during last resume\n"); pdata->suspend_flags = 0; } /* determine if link is up using only _nopm functions */ link_up = smsc75xx_link_ok_nopm(dev); if (message.event == PM_EVENT_AUTO_SUSPEND) { ret = smsc75xx_autosuspend(dev, link_up); goto done; } /* if we get this far we're not autosuspending */ /* if no wol options set, or if link is down and we're not waking on * PHY activity, enter lowest power SUSPEND2 mode */ if (!(pdata->wolopts & SUPPORTED_WAKE) || !(link_up || (pdata->wolopts & WAKE_PHY))) { netdev_info(dev->net, "entering SUSPEND2 mode\n"); /* disable energy detect (link up) & wake up events */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_WUEN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } ret = smsc75xx_enter_suspend2(dev); goto done; } if (pdata->wolopts & WAKE_PHY) { ret = smsc75xx_enable_phy_wakeup_interrupts(dev, (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN)); if (ret < 0) { netdev_warn(dev->net, "error enabling PHY wakeup ints\n"); goto done; } /* if link is down then configure EDPD and enter SUSPEND1, * otherwise enter SUSPEND0 below */ if (!link_up) { struct mii_if_info *mii = &dev->mii; netdev_info(dev->net, "entering SUSPEND1 mode\n"); /* enable energy detect power-down mode */ ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS); if (ret < 0) { netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n"); goto done; } ret |= MODE_CTRL_STS_EDPWRDOWN; smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret); /* enter SUSPEND1 mode */ ret = smsc75xx_enter_suspend1(dev); goto done; } } if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) { int i, filter = 0; /* disable all filters */ for (i = 0; i < WUF_NUM; i++) { ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0); if (ret < 0) { netdev_warn(dev->net, "Error writing WUF_CFGX\n"); goto done; } } if (pdata->wolopts & WAKE_MCAST) { const u8 mcast[] = {0x01, 0x00, 0x5E}; netdev_info(dev->net, "enabling multicast detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST | smsc_crc(mcast, 3); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } if (pdata->wolopts & WAKE_ARP) { const u8 arp[] = {0x08, 0x06}; netdev_info(dev->net, "enabling ARP detection\n"); val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16) | smsc_crc(arp, 2); ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003); if (ret < 0) { netdev_warn(dev->net, "Error writing wakeup filter\n"); goto done; } } /* clear any pending pattern match packet status */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } netdev_info(dev->net, "enabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } else { netdev_info(dev->net, "disabling packet match detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~WUCSR_WUEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* disable magic, bcast & unicast wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } if (pdata->wolopts & WAKE_PHY) { netdev_info(dev->net, "enabling PHY wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); goto done; } /* clear wol status, enable energy detection */ val &= ~PMT_CTL_WUPS; val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN); ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); goto done; } } if (pdata->wolopts & WAKE_MAGIC) { netdev_info(dev->net, "enabling magic packet wakeup\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } /* clear any pending magic packet status */ val |= WUCSR_MPR | WUCSR_MPEN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_BCAST) { netdev_info(dev->net, "enabling broadcast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_BCAST_FR | WUCSR_BCST_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } if (pdata->wolopts & WAKE_UCAST) { netdev_info(dev->net, "enabling unicast detection\n"); ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); goto done; } val |= WUCSR_WUFR | WUCSR_PFDA_EN; ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); goto done; } } /* enable receiver to enable frame reception */ ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val); if (ret < 0) { netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret); goto done; } val |= MAC_RX_RXEN; ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val); if (ret < 0) { netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret); goto done; } /* some wol options are enabled, so enter SUSPEND0 */ netdev_info(dev->net, "entering SUSPEND0 mode\n"); ret = smsc75xx_enter_suspend0(dev); done: /* * TODO: resume() might need to handle the suspend failure * in system sleep */ if (ret && PMSG_IS_AUTO(message)) usbnet_resume(intf); return ret; } static int smsc75xx_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); u8 suspend_flags = pdata->suspend_flags; int ret; u32 val; netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags); /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; if (suspend_flags & SUSPEND_ALLMODES) { /* Disable wakeup sources */ ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading WUCSR\n"); return ret; } val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN | WUCSR_BCST_EN); ret = smsc75xx_write_reg_nopm(dev, WUCSR, val); if (ret < 0) { netdev_warn(dev->net, "Error writing WUCSR\n"); return ret; } /* clear wake-up status */ ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val &= ~PMT_CTL_WOL_EN; val |= PMT_CTL_WUPS; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } if (suspend_flags & SUSPEND_SUSPEND2) { netdev_info(dev->net, "resuming from SUSPEND2\n"); ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val); if (ret < 0) { netdev_warn(dev->net, "Error reading PMT_CTL\n"); return ret; } val |= PMT_CTL_PHY_PWRUP; ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val); if (ret < 0) { netdev_warn(dev->net, "Error writing PMT_CTL\n"); return ret; } } ret = smsc75xx_wait_ready(dev, 1); if (ret < 0) { netdev_warn(dev->net, "device not ready in smsc75xx_resume\n"); return ret; } return usbnet_resume(intf); } static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { if (!(dev->net->features & NETIF_F_RXCSUM) || unlikely(rx_cmd_a & RX_CMD_A_LCSM)) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT)); skb->ip_summed = CHECKSUM_COMPLETE; } } static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; unsigned char *packet; memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a)); le32_to_cpus(&rx_cmd_a); skb_pull(skb, 4); memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b)); le32_to_cpus(&rx_cmd_b); skb_pull(skb, 4 + RXW_PADDING); packet = skb->data; /* get the packet length */ size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); dev->net->stats.rx_errors++; dev->net->stats.rx_dropped++; if (rx_cmd_a & RX_CMD_A_FCS) dev->net->stats.rx_crc_errors++; else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) dev->net->stats.rx_frame_errors++; } else { /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } /* last frame in this batch */ if (skb->len == size) { smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a, rx_cmd_b); skb_trim(skb, skb->len - 4); /* remove fcs */ skb->truesize = size + sizeof(struct sk_buff); return 1; } ax_skb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!ax_skb)) { netdev_warn(dev->net, "Error allocating skb\n"); return 0; } ax_skb->len = size; ax_skb->data = packet; skb_set_tail_pointer(ax_skb, size); smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a, rx_cmd_b); skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ ax_skb->truesize = size + sizeof(struct sk_buff); usbnet_skb_return(dev, ax_skb); } skb_pull(skb, size); /* padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } if (unlikely(skb->len < 0)) { netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len); return 0; } return 1; } static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { u32 tx_cmd_a, tx_cmd_b; if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { struct sk_buff *skb2 = skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS; if (skb->ip_summed == CHECKSUM_PARTIAL) tx_cmd_a |= TX_CMD_A_IPE | TX_CMD_A_TPE; if (skb_is_gso(skb)) { u16 mss = max(skb_shinfo(skb)->gso_size, TX_MSS_MIN); tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT) & TX_CMD_B_MSS; tx_cmd_a |= TX_CMD_A_LSO; } else { tx_cmd_b = 0; } skb_push(skb, 4); cpu_to_le32s(&tx_cmd_b); memcpy(skb->data, &tx_cmd_b, 4); skb_push(skb, 4); cpu_to_le32s(&tx_cmd_a); memcpy(skb->data, &tx_cmd_a, 4); return skb; } static int smsc75xx_manage_power(struct usbnet *dev, int on) { dev->intf->needs_remote_wakeup = on; return 0; } static const struct driver_info smsc75xx_info = { .description = "smsc75xx USB 2.0 Gigabit Ethernet", .bind = smsc75xx_bind, .unbind = smsc75xx_unbind, .link_reset = smsc75xx_link_reset, .reset = smsc75xx_reset, .rx_fixup = smsc75xx_rx_fixup, .tx_fixup = smsc75xx_tx_fixup, .status = smsc75xx_status, .manage_power = smsc75xx_manage_power, .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7500), .driver_info = (unsigned long) &smsc75xx_info, }, { /* SMSC7500 USB Gigabit Ethernet Device */ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_PRODUCT_ID_LAN7505), .driver_info = (unsigned long) &smsc75xx_info, }, { }, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver smsc75xx_driver = { .name = SMSC_CHIPNAME, .id_table = products, .probe = usbnet_probe, .suspend = smsc75xx_suspend, .resume = smsc75xx_resume, .reset_resume = smsc75xx_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, .supports_autosuspend = 1, }; module_usb_driver(smsc75xx_driver); MODULE_AUTHOR("Nancy Lin"); MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>"); MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices"); MODULE_LICENSE("GPL");
gpl-2.0
drhonk/Bali_t959
arch/powerpc/lib/sstep.c
1503
5062
/* * Single-step support. * * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <asm/sstep.h> #include <asm/processor.h> extern char system_call_common[]; #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ #define MSR_MASK 0xffffffff87c0ffffUL #else #define MSR_MASK 0x87c0ffff #endif /* * Determine whether a conditional branch instruction would branch. */ static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) { unsigned int bo = (instr >> 21) & 0x1f; unsigned int bi; if ((bo & 4) == 0) { /* decrement counter */ --regs->ctr; if (((bo >> 1) & 1) ^ (regs->ctr == 0)) return 0; } if ((bo & 0x10) == 0) { /* check bit from CR */ bi = (instr >> 16) & 0x1f; if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) return 0; } return 1; } /* * Emulate instructions that cause a transfer of control. * Returns 1 if the step was emulated, 0 if not, * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) { unsigned int opcode, rs, rb, rd, spr; unsigned long int imm; opcode = instr >> 26; switch (opcode) { case 16: /* bc */ imm = (signed short)(instr & 0xfffc); if ((instr & 2) == 0) imm += regs->nip; regs->nip += 4; if ((regs->msr & MSR_SF) == 0) regs->nip &= 0xffffffffUL; if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) regs->nip = imm; return 1; #ifdef CONFIG_PPC64 case 17: /* sc */ /* * N.B. this uses knowledge about how the syscall * entry code works. If that is changed, this will * need to be changed also. */ regs->gpr[9] = regs->gpr[13]; regs->gpr[11] = regs->nip + 4; regs->gpr[12] = regs->msr & MSR_MASK; regs->gpr[13] = (unsigned long) get_paca(); regs->nip = (unsigned long) &system_call_common; regs->msr = MSR_KERNEL; return 1; #endif case 18: /* b */ imm = instr & 0x03fffffc; if (imm & 0x02000000) imm -= 0x04000000; if ((instr & 2) == 0) imm += regs->nip; if (instr & 1) { regs->link = regs->nip + 4; if ((regs->msr & MSR_SF) == 0) regs->link &= 0xffffffffUL; } if ((regs->msr & MSR_SF) == 0) imm &= 0xffffffffUL; regs->nip = imm; return 1; case 19: switch (instr & 0x7fe) { case 0x20: /* bclr */ case 0x420: /* bcctr */ imm = (instr & 0x400)? regs->ctr: regs->link; regs->nip += 4; if ((regs->msr & MSR_SF) == 0) { regs->nip &= 0xffffffffUL; imm &= 0xffffffffUL; } if (instr & 1) regs->link = regs->nip; if (branch_taken(instr, regs)) regs->nip = imm; return 1; case 0x24: /* rfid, scary */ return -1; } case 31: rd = (instr >> 21) & 0x1f; switch (instr & 0x7fe) { case 0xa6: /* mfmsr */ regs->gpr[rd] = regs->msr & MSR_MASK; regs->nip += 4; if ((regs->msr & MSR_SF) == 0) regs->nip &= 0xffffffffUL; return 1; case 0x124: /* mtmsr */ imm = regs->gpr[rd]; if ((imm & MSR_RI) == 0) /* can't step mtmsr that would clear MSR_RI */ return -1; regs->msr = imm; regs->nip += 4; return 1; #ifdef CONFIG_PPC64 case 0x164: /* mtmsrd */ /* only MSR_EE and MSR_RI get changed if bit 15 set */ /* mtmsrd doesn't change MSR_HV and MSR_ME */ imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL; imm = (regs->msr & MSR_MASK & ~imm) | (regs->gpr[rd] & imm); if ((imm & MSR_RI) == 0) /* can't step mtmsrd that would clear MSR_RI */ return -1; regs->msr = imm; regs->nip += 4; if ((imm & MSR_SF) == 0) regs->nip &= 0xffffffffUL; return 1; #endif case 0x26: /* mfcr */ regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto mtspr_out; case 0x2a6: /* mfspr */ spr = (instr >> 11) & 0x3ff; switch (spr) { case 0x20: /* mfxer */ regs->gpr[rd] = regs->xer; regs->gpr[rd] &= 0xffffffffUL; goto mtspr_out; case 0x100: /* mflr */ regs->gpr[rd] = regs->link; goto mtspr_out; case 0x120: /* mfctr */ regs->gpr[rd] = regs->ctr; goto mtspr_out; } break; case 0x378: /* orx */ if (instr & 1) break; rs = (instr >> 21) & 0x1f; rb = (instr >> 11) & 0x1f; if (rs == rb) { /* mr */ rd = (instr >> 16) & 0x1f; regs->gpr[rd] = regs->gpr[rs]; goto mtspr_out; } break; case 0x3a6: /* mtspr */ spr = (instr >> 11) & 0x3ff; switch (spr) { case 0x20: /* mtxer */ regs->xer = (regs->gpr[rd] & 0xffffffffUL); goto mtspr_out; case 0x100: /* mtlr */ regs->link = regs->gpr[rd]; goto mtspr_out; case 0x120: /* mtctr */ regs->ctr = regs->gpr[rd]; mtspr_out: regs->nip += 4; return 1; } } } return 0; }
gpl-2.0
leopesto/kernel_mtk6577
drivers/infiniband/hw/mlx4/main.c
2527
34026
/* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> #include "mlx4_ib.h" #include "user.h" #define DRV_NAME "mlx4_ib" #define DRV_VERSION "1.0" #define DRV_RELDATE "April 4, 2008" MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); static const char mlx4_ib_version[] = DRV_NAME ": Mellanox ConnectX InfiniBand driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; struct update_gid_work { struct work_struct work; union ib_gid gids[128]; struct mlx4_ib_dev *dev; int port; }; static struct workqueue_struct *wq; static void init_query_mad(struct ib_smp *mad) { mad->base_version = 1; mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; mad->class_version = 1; mad->method = IB_MGMT_METHOD_GET; } static union ib_gid zgid; static int mlx4_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memset(props, 0, sizeof *props); props->fw_ver = dev->dev->caps.fw_ver; props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH) props->device_cap_flags |= IB_DEVICE_UD_TSO; if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; props->page_size_cap = dev->dev->caps.page_size_cap; props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; props->max_qp_wr = dev->dev->caps.max_wqes; props->max_sge = min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; props->max_cqe = dev->dev->caps.max_cqes; props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws; props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; props->max_srq_sge = dev->dev->caps.max_srq_sge; props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_HCA; props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; out: kfree(in_mad); kfree(out_mad); return err; } static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) { struct mlx4_dev *dev = to_mdev(device)->dev; return dev->caps.port_mask & (1 << (port_num - 1)) ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; } static int ib_link_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props, struct ib_smp *out_mad) { props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); props->sm_sl = out_mad->data[36] & 0xf; props->state = out_mad->data[32] & 0xf; props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; props->active_speed = out_mad->data[35] >> 4; props->max_mtu = out_mad->data[41] & 0xf; props->active_mtu = out_mad->data[36] >> 4; props->subnet_timeout = out_mad->data[51] & 0x1f; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = out_mad->data[41] >> 4; return 0; } static u8 state_to_phys_state(enum ib_port_state state) { return state == IB_PORT_ACTIVE ? 5 : 3; } static int eth_link_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props, struct ib_smp *out_mad) { struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; struct net_device *ndev; enum ib_mtu tmp; props->active_width = IB_WIDTH_1X; props->active_speed = 4; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; props->pkey_tbl_len = 1; props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->max_mtu = IB_MTU_2048; props->subnet_timeout = 0; props->max_vl_num = out_mad->data[37] >> 4; props->init_type_reply = 0; props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) goto out; tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); out: spin_unlock(&iboe->lock); return 0; } static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; memset(props, 0, sizeof *props); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? ib_link_query_port(ibdev, port, props, out_mad) : eth_link_query_port(ibdev, port, props, out_mad); out: kfree(in_mad); kfree(out_mad); return err; } static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw, out_mad->data + 8, 8); init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); out: kfree(in_mad); kfree(out_mad); return err; } static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct mlx4_ib_dev *dev = to_mdev(ibdev); *gid = dev->iboe.gid_table[port - 1][index]; return 0; } static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) return __mlx4_ib_query_gid(ibdev, port, index, gid); else return iboe_query_gid(ibdev, port, index, gid); } static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) goto out; *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); out: kfree(in_mad); kfree(out_mad); return err; } static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx4_cmd_mailbox *mailbox; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; spin_lock(&to_mdev(ibdev)->sm_lock); memcpy(ibdev->node_desc, props->node_desc, 64); spin_unlock(&to_mdev(ibdev)->sm_lock); /* * If possible, pass node desc to FW, so it can generate * a 144 trap. If cmd fails, just ignore. */ mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); if (IS_ERR(mailbox)) return 0; memset(mailbox->buf, 0, 256); memcpy(mailbox->buf, props->node_desc, 64); mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A); mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); return 0; } static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, u32 cap_mask) { struct mlx4_cmd_mailbox *mailbox; int err; u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; mailbox = mlx4_alloc_cmd_mailbox(dev->dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, 256); if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); } else { ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); } err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); mlx4_free_cmd_mailbox(dev->dev, mailbox); return err; } static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props) { struct ib_port_attr attr; u32 cap_mask; int err; mutex_lock(&to_mdev(ibdev)->cap_mask_mutex); err = mlx4_ib_query_port(ibdev, port, &attr); if (err) goto out; cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & ~props->clr_port_cap_mask; err = mlx4_SET_PORT(to_mdev(ibdev), port, !!(mask & IB_PORT_RESET_QKEY_CNTR), cap_mask); out: mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); return err; } static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_ucontext *context; struct mlx4_ib_alloc_ucontext_resp resp; int err; if (!dev->ib_active) return ERR_PTR(-EAGAIN); resp.qp_tab_size = dev->dev->caps.num_qps; resp.bf_reg_size = dev->dev->caps.bf_reg_size; resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); if (err) { kfree(context); return ERR_PTR(err); } INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); err = ib_copy_to_udata(udata, &resp, sizeof resp); if (err) { mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); kfree(context); return ERR_PTR(-EFAULT); } return &context->ibucontext; } static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct mlx4_ib_ucontext *context = to_mucontext(ibcontext); mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar); kfree(context); return 0; } static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct mlx4_ib_dev *dev = to_mdev(context->device); if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; if (vma->vm_pgoff == 0) { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn + dev->dev->caps.num_uars, PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; } else return -EINVAL; return 0; } static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct mlx4_ib_pd *pd; int err; pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); if (err) { kfree(pd); return ERR_PTR(err); } if (context) if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) { mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); kfree(pd); return ERR_PTR(-EFAULT); } return &pd->ibpd; } static int mlx4_ib_dealloc_pd(struct ib_pd *pd) { mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn); kfree(pd); return 0; } static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) { struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_gid_entry *ge; ge = kzalloc(sizeof *ge, GFP_KERNEL); if (!ge) return -ENOMEM; ge->gid = *gid; if (mlx4_ib_add_mc(mdev, mqp, gid)) { ge->port = mqp->port; ge->added = 1; } mutex_lock(&mqp->mutex); list_add_tail(&ge->list, &mqp->gid_list); mutex_unlock(&mqp->mutex); return 0; } int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid) { u8 mac[6]; struct net_device *ndev; int ret = 0; if (!mqp->port) return 0; spin_lock(&mdev->iboe.lock); ndev = mdev->iboe.netdevs[mqp->port - 1]; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); if (ndev) { rdma_get_mcast_mac((struct in6_addr *)gid, mac); rtnl_lock(); dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); ret = 1; rtnl_unlock(); dev_put(ndev); } return ret; } static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_qp *mqp = to_mqp(ibqp); err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), MLX4_PROT_IB_IPV6); if (err) return err; err = add_gid_entry(ibqp, gid); if (err) goto err_add; return 0; err_add: mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); return err; } static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) { struct mlx4_ib_gid_entry *ge; struct mlx4_ib_gid_entry *tmp; struct mlx4_ib_gid_entry *ret = NULL; list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { if (!memcmp(raw, ge->gid.raw, 16)) { ret = ge; break; } } return ret; } static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_qp *mqp = to_mqp(ibqp); u8 mac[6]; struct net_device *ndev; struct mlx4_ib_gid_entry *ge; err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); if (err) return err; mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { spin_lock(&mdev->iboe.lock); ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); rdma_get_mcast_mac((struct in6_addr *)gid, mac); if (ndev) { rtnl_lock(); dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); rtnl_unlock(); dev_put(ndev); } list_del(&ge->list); kfree(ge); } else printk(KERN_WARNING "could not find mgid entry\n"); mutex_unlock(&mqp->mutex); return 0; } static int init_node_data(struct mlx4_ib_dev *dev) { struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; int err = -ENOMEM; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); if (!in_mad || !out_mad) goto out; init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(dev->ib_dev.node_desc, out_mad->data, 64); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); if (err) goto out; memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); out: kfree(in_mad); kfree(out_mad); return err; } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); return sprintf(buf, "MT%d\n", dev->dev->pdev->device); } static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32), (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, (int) dev->dev->caps.fw_ver & 0xffff); } static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); return sprintf(buf, "%x\n", dev->dev->rev_id); } static ssize_t show_board(struct device *device, struct device_attribute *attr, char *buf) { struct mlx4_ib_dev *dev = container_of(device, struct mlx4_ib_dev, ib_dev.dev); return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id); } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); static struct device_attribute *mlx4_class_attributes[] = { &dev_attr_hw_rev, &dev_attr_fw_ver, &dev_attr_hca_type, &dev_attr_board_id }; static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev) { memcpy(eui, dev->dev_addr, 3); memcpy(eui + 5, dev->dev_addr + 3, 3); if (vlan_id < 0x1000) { eui[3] = vlan_id >> 8; eui[4] = vlan_id & 0xff; } else { eui[3] = 0xff; eui[4] = 0xfe; } eui[0] ^= 2; } static void update_gids_task(struct work_struct *work) { struct update_gid_work *gw = container_of(work, struct update_gid_work, work); struct mlx4_cmd_mailbox *mailbox; union ib_gid *gids; int err; struct mlx4_dev *dev = gw->dev->dev; struct ib_event event; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox)); return; } gids = mailbox->buf; memcpy(gids, gw->gids, sizeof gw->gids); err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); if (err) printk(KERN_WARNING "set port command failed\n"); else { memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); event.device = &gw->dev->ib_dev; event.element.port_num = gw->port; event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } mlx4_free_cmd_mailbox(dev, mailbox); kfree(gw); } static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) { struct net_device *ndev = dev->iboe.netdevs[port - 1]; struct update_gid_work *work; struct net_device *tmp; int i; u8 *hits; int ret; union ib_gid gid; int free; int found; int need_update = 0; u16 vid; work = kzalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; hits = kzalloc(128, GFP_ATOMIC); if (!hits) { ret = -ENOMEM; goto out; } rcu_read_lock(); for_each_netdev_rcu(&init_net, tmp) { if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); vid = rdma_vlan_dev_vlan_id(tmp); mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev); found = 0; free = -1; for (i = 0; i < 128; ++i) { if (free < 0 && !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) free = i; if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) { hits[i] = 1; found = 1; break; } } if (!found) { if (tmp == ndev && (memcmp(&dev->iboe.gid_table[port - 1][0], &gid, sizeof gid) || !memcmp(&dev->iboe.gid_table[port - 1][0], &zgid, sizeof gid))) { dev->iboe.gid_table[port - 1][0] = gid; ++need_update; hits[0] = 1; } else if (free >= 0) { dev->iboe.gid_table[port - 1][free] = gid; hits[free] = 1; ++need_update; } } } } rcu_read_unlock(); for (i = 0; i < 128; ++i) if (!hits[i]) { if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) ++need_update; dev->iboe.gid_table[port - 1][i] = zgid; } if (need_update) { memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); INIT_WORK(&work->work, update_gids_task); work->port = port; work->dev = dev; queue_work(wq, &work->work); } else kfree(work); kfree(hits); return 0; out: kfree(work); return ret; } static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) { switch (event) { case NETDEV_UP: case NETDEV_CHANGEADDR: update_ipv6_gids(dev, port, 0); break; case NETDEV_DOWN: update_ipv6_gids(dev, port, 1); dev->iboe.netdevs[port - 1] = NULL; } } static void netdev_added(struct mlx4_ib_dev *dev, int port) { update_ipv6_gids(dev, port, 0); } static void netdev_removed(struct mlx4_ib_dev *dev, int port) { update_ipv6_gids(dev, port, 1); } static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct mlx4_ib_dev *ibdev; struct net_device *oldnd; struct mlx4_ib_iboe *iboe; int port; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); iboe = &ibdev->iboe; spin_lock(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { oldnd = iboe->netdevs[port - 1]; iboe->netdevs[port - 1] = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); if (oldnd != iboe->netdevs[port - 1]) { if (iboe->netdevs[port - 1]) netdev_added(ibdev, port); else netdev_removed(ibdev, port); } } if (dev == iboe->netdevs[0] || (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) handle_en_event(ibdev, 1, event); else if (dev == iboe->netdevs[1] || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) handle_en_event(ibdev, 2, event); spin_unlock(&iboe->lock); return NOTIFY_DONE; } static void *mlx4_ib_add(struct mlx4_dev *dev) { struct mlx4_ib_dev *ibdev; int num_ports = 0; int i; int err; struct mlx4_ib_iboe *iboe; printk_once(KERN_INFO "%s", mlx4_ib_version); mlx4_foreach_ib_transport_port(i, dev) num_ports++; /* No point in registering a device with no ports... */ if (num_ports == 0) return NULL; ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); if (!ibdev) { dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); return NULL; } iboe = &ibdev->iboe; if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) goto err_dealloc; if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) goto err_pd; ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!ibdev->uar_map) goto err_uar; MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); ibdev->ib_dev.owner = THIS_MODULE; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ibdev->num_ports = num_ports; ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dma_device = &dev->pdev->dev; ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; ibdev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); ibdev->ib_dev.query_device = mlx4_ib_query_device; ibdev->ib_dev.query_port = mlx4_ib_query_port; ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; ibdev->ib_dev.query_gid = mlx4_ib_query_gid; ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; ibdev->ib_dev.modify_device = mlx4_ib_modify_device; ibdev->ib_dev.modify_port = mlx4_ib_modify_port; ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; ibdev->ib_dev.mmap = mlx4_ib_mmap; ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; ibdev->ib_dev.create_ah = mlx4_ib_create_ah; ibdev->ib_dev.query_ah = mlx4_ib_query_ah; ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; ibdev->ib_dev.create_srq = mlx4_ib_create_srq; ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; ibdev->ib_dev.query_srq = mlx4_ib_query_srq; ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; ibdev->ib_dev.create_qp = mlx4_ib_create_qp; ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; ibdev->ib_dev.query_qp = mlx4_ib_query_qp; ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; ibdev->ib_dev.post_send = mlx4_ib_post_send; ibdev->ib_dev.post_recv = mlx4_ib_post_recv; ibdev->ib_dev.create_cq = mlx4_ib_create_cq; ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr; ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list; ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list; ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; spin_lock_init(&iboe->lock); if (init_node_data(ibdev)) goto err_map; spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); if (ib_register_device(&ibdev->ib_dev, NULL)) goto err_map; if (mlx4_ib_mad_init(ibdev)) goto err_reg; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { iboe->nb.notifier_call = mlx4_ib_netdev_event; err = register_netdevice_notifier(&iboe->nb); if (err) goto err_reg; } for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { if (device_create_file(&ibdev->ib_dev.dev, mlx4_class_attributes[i])) goto err_notif; } ibdev->ib_active = true; return ibdev; err_notif: if (unregister_netdevice_notifier(&ibdev->iboe.nb)) printk(KERN_WARNING "failure unregistering notifier\n"); flush_workqueue(wq); err_reg: ib_unregister_device(&ibdev->ib_dev); err_map: iounmap(ibdev->uar_map); err_uar: mlx4_uar_free(dev, &ibdev->priv_uar); err_pd: mlx4_pd_free(dev, ibdev->priv_pdn); err_dealloc: ib_dealloc_device(&ibdev->ib_dev); return NULL; } static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) { struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; mlx4_ib_mad_cleanup(ibdev); ib_unregister_device(&ibdev->ib_dev); if (ibdev->iboe.nb.notifier_call) { if (unregister_netdevice_notifier(&ibdev->iboe.nb)) printk(KERN_WARNING "failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } iounmap(ibdev->uar_map); mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) mlx4_CLOSE_PORT(dev, p); mlx4_uar_free(dev, &ibdev->priv_uar); mlx4_pd_free(dev, ibdev->priv_pdn); ib_dealloc_device(&ibdev->ib_dev); } static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, enum mlx4_dev_event event, int port) { struct ib_event ibev; struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); if (port > ibdev->num_ports) return; switch (event) { case MLX4_DEV_EVENT_PORT_UP: ibev.event = IB_EVENT_PORT_ACTIVE; break; case MLX4_DEV_EVENT_PORT_DOWN: ibev.event = IB_EVENT_PORT_ERR; break; case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; break; default: return; } ibev.device = ibdev_ptr; ibev.element.port_num = port; ib_dispatch_event(&ibev); } static struct mlx4_interface mlx4_ib_interface = { .add = mlx4_ib_add, .remove = mlx4_ib_remove, .event = mlx4_ib_event, .protocol = MLX4_PROT_IB_IPV6 }; static int __init mlx4_ib_init(void) { int err; wq = create_singlethread_workqueue("mlx4_ib"); if (!wq) return -ENOMEM; err = mlx4_register_interface(&mlx4_ib_interface); if (err) { destroy_workqueue(wq); return err; } return 0; } static void __exit mlx4_ib_cleanup(void) { mlx4_unregister_interface(&mlx4_ib_interface); destroy_workqueue(wq); } module_init(mlx4_ib_init); module_exit(mlx4_ib_cleanup);
gpl-2.0
usb-bullhead-ubuntu-touch/kernel_msm
arch/mips/math-emu/dp_sub.c
2527
4950
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) { COMPXDP; COMPYDP; EXPLODEXDP; EXPLODEYDP; CLEARCX; FLUSHXDP; FLUSHYDP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_nanxcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs != ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754dp_xcpt(ieee754dp_indef(), "sub", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): return ieee754dp_inf(ys ^ 1); case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs != ys) return x; else return ieee754dp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): /* quick fix up */ DPSIGN(y) ^= 1; return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; /* FALL THROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): /* normalize ym,ye */ DPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): /* normalize xm,xe */ DPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } /* flip sign of y and handle as add */ ys ^= 1; assert(xm & DP_HIDDEN_BIT); assert(ym & DP_HIDDEN_BIT); /* provide guard,round and stick bit dpace */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; ym = XDPSRS(ym, s); ye += s; } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; xm = XDPSRS(xm, s); xe += s; } assert(xe == ye); assert(xe <= DP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */ xm = XDPSRS1(xm); /* shift preserving sticky */ xe++; } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) { if (ieee754_csr.rm == IEEE754_RD) return ieee754dp_zero(1); /* round negative inf. => sign = -1 */ else return ieee754dp_zero(0); /* other round modes => sign = 1 */ } /* normalize to rounding precision */ while ((xm >> (DP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } DPNORMRET2(xs, xe, xm, "sub", x, y); }
gpl-2.0
ShikharArvind/android_kernel_xolo_q1100
drivers/iommu/amd_iommu_init.c
2783
43922
/* * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. * Author: Joerg Roedel <joerg.roedel@amd.com> * Leo Duran <leo.duran@amd.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/pci.h> #include <linux/acpi.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> #include "amd_iommu_proto.h" #include "amd_iommu_types.h" /* * definitions for the ACPI scanning code */ #define IVRS_HEADER_LENGTH 48 #define ACPI_IVHD_TYPE 0x10 #define ACPI_IVMD_TYPE_ALL 0x20 #define ACPI_IVMD_TYPE 0x21 #define ACPI_IVMD_TYPE_RANGE 0x22 #define IVHD_DEV_ALL 0x01 #define IVHD_DEV_SELECT 0x02 #define IVHD_DEV_SELECT_RANGE_START 0x03 #define IVHD_DEV_RANGE_END 0x04 #define IVHD_DEV_ALIAS 0x42 #define IVHD_DEV_ALIAS_RANGE 0x43 #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 #define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08 #define IVMD_FLAG_UNITY_MAP 0x01 #define ACPI_DEVFLAG_INITPASS 0x01 #define ACPI_DEVFLAG_EXTINT 0x02 #define ACPI_DEVFLAG_NMI 0x04 #define ACPI_DEVFLAG_SYSMGT1 0x10 #define ACPI_DEVFLAG_SYSMGT2 0x20 #define ACPI_DEVFLAG_LINT0 0x40 #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 /* * ACPI table definitions * * These data structures are laid over the table to parse the important values * out of it. */ /* * structure describing one IOMMU in the ACPI table. Typically followed by one * or more ivhd_entrys. */ struct ivhd_header { u8 type; u8 flags; u16 length; u16 devid; u16 cap_ptr; u64 mmio_phys; u16 pci_seg; u16 info; u32 reserved; } __attribute__((packed)); /* * A device entry describing which devices a specific IOMMU translates and * which requestor ids they use. */ struct ivhd_entry { u8 type; u16 devid; u8 flags; u32 ext; } __attribute__((packed)); /* * An AMD IOMMU memory definition structure. It defines things like exclusion * ranges for devices and regions that should be unity mapped. */ struct ivmd_header { u8 type; u8 flags; u16 length; u16 devid; u16 aux; u64 resv; u64 range_start; u64 range_length; } __attribute__((packed)); bool amd_iommu_dump; static int __initdata amd_iommu_detected; static bool __initdata amd_iommu_disabled; u16 amd_iommu_last_bdf; /* largest PCI device id we have to handle */ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings we find in ACPI */ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the system */ /* Array to assign indices to IOMMUs*/ struct amd_iommu *amd_iommus[MAX_IOMMUS]; int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; bool amd_iommu_iotlb_sup __read_mostly = true; u32 amd_iommu_max_pasids __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; bool amd_iommu_force_isolation __read_mostly; /* * The ACPI table parsing functions set this variable on an error */ static int __initdata amd_iommu_init_err; /* * List of protection domains - used during resume */ LIST_HEAD(amd_iommu_pd_list); spinlock_t amd_iommu_pd_lock; /* * Pointer to the device table which is shared by all AMD IOMMUs * it is indexed by the PCI device id or the HT unit id and contains * information about the domain the device belongs to as well as the * page table root pointer. */ struct dev_table_entry *amd_iommu_dev_table; /* * The alias table is a driver specific data structure which contains the * mappings of the PCI device ids to the actual requestor ids on the IOMMU. * More than one device can share the same requestor id. */ u16 *amd_iommu_alias_table; /* * The rlookup table is used to find the IOMMU which is responsible * for a specific device. It is also indexed by the PCI device id. */ struct amd_iommu **amd_iommu_rlookup_table; /* * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap * to know which ones are already in use. */ unsigned long *amd_iommu_pd_alloc_bitmap; static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ /* * This function flushes all internal caches of * the IOMMU used by this driver. */ extern void iommu_flush_all_caches(struct amd_iommu *iommu); static int amd_iommu_enable_interrupts(void); static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) amd_iommu_last_bdf = devid; } static inline unsigned long tbl_size(int entry_size) { unsigned shift = PAGE_SHIFT + get_order(((int)amd_iommu_last_bdf + 1) * entry_size); return 1UL << shift; } /* Access to l1 and l2 indexed register spaces */ static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); pci_read_config_dword(iommu->dev, 0xfc, &val); return val; } static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); pci_write_config_dword(iommu->dev, 0xfc, val); pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); } static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) { u32 val; pci_write_config_dword(iommu->dev, 0xf0, address); pci_read_config_dword(iommu->dev, 0xf4, &val); return val; } static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) { pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); pci_write_config_dword(iommu->dev, 0xf4, val); } /**************************************************************************** * * AMD IOMMU MMIO register space handling functions * * These functions are used to program the IOMMU device registers in * MMIO space required for that driver. * ****************************************************************************/ /* * This function set the exclusion range in the IOMMU. DMA accesses to the * exclusion range are passed through untranslated */ static void iommu_set_exclusion_range(struct amd_iommu *iommu) { u64 start = iommu->exclusion_start & PAGE_MASK; u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; u64 entry; if (!iommu->exclusion_start) return; entry = start | MMIO_EXCL_ENABLE_MASK; memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, &entry, sizeof(entry)); entry = limit; memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, &entry, sizeof(entry)); } /* Programs the physical address of the device table into the IOMMU hardware */ static void iommu_set_device_table(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->mmio_base == NULL); entry = virt_to_phys(amd_iommu_dev_table); entry |= (dev_table_size >> 12) - 1; memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, &entry, sizeof(entry)); } /* Generic functions to enable/disable certain features of the IOMMU. */ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl |= (1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~(1 << bit); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) { u32 ctrl; ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); ctrl &= ~CTRL_INV_TO_MASK; ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); } /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { static const char * const feat_str[] = { "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", "IA", "GA", "HE", "PC", NULL }; int i; printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", dev_name(&iommu->dev->dev), iommu->cap_ptr); if (iommu->cap & (1 << IOMMU_CAP_EFR)) { printk(KERN_CONT " extended features: "); for (i = 0; feat_str[i]; ++i) if (iommu_feature(iommu, (1ULL << i))) printk(KERN_CONT " %s", feat_str[i]); } printk(KERN_CONT "\n"); iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } static void iommu_disable(struct amd_iommu *iommu) { /* Disable command buffer */ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); /* Disable event logging and event interrupts */ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); /* Disable IOMMU hardware itself */ iommu_feature_disable(iommu, CONTROL_IOMMU_EN); } /* * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in * the system has one. */ static u8 * __init iommu_map_mmio_space(u64 address) { if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", address); pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); return NULL; } return ioremap_nocache(address, MMIO_REGION_LENGTH); } static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) { if (iommu->mmio_base) iounmap(iommu->mmio_base); release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); } /**************************************************************************** * * The functions below belong to the first pass of AMD IOMMU ACPI table * parsing. In this pass we try to find out the highest device id this * code has to handle. Upon this information the size of the shared data * structures is determined later. * ****************************************************************************/ /* * This function calculates the length of a given IVHD entry */ static inline int ivhd_entry_length(u8 *ivhd) { return 0x04 << (*ivhd >> 6); } /* * This function reads the last device id the IOMMU has to handle from the PCI * capability header for this IOMMU */ static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) { u32 cap; cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); return 0; } /* * After reading the highest device id from the IOMMU PCI capability header * this function looks if there is a higher device id defined in the ACPI table */ static int __init find_last_devid_from_ivhd(struct ivhd_header *h) { u8 *p = (void *)h, *end = (void *)h; struct ivhd_entry *dev; p += sizeof(*h); end += h->length; find_last_devid_on_pci(PCI_BUS(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr); while (p < end) { dev = (struct ivhd_entry *)p; switch (dev->type) { case IVHD_DEV_SELECT: case IVHD_DEV_RANGE_END: case IVHD_DEV_ALIAS: case IVHD_DEV_EXT_SELECT: /* all the above subfield types refer to device ids */ update_last_devid(dev->devid); break; default: break; } p += ivhd_entry_length(p); } WARN_ON(p != end); return 0; } /* * Iterate over all IVHD entries in the ACPI table and find the highest device * id which we need to handle. This is the first of three functions which parse * the ACPI table. So we check the checksum here. */ static int __init find_last_devid_acpi(struct acpi_table_header *table) { int i; u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; /* * Validate checksum here so we don't need to do it when * we actually parse the table */ for (i = 0; i < table->length; ++i) checksum += p[i]; if (checksum != 0) { /* ACPI table corrupt */ amd_iommu_init_err = -ENODEV; return 0; } p += IVRS_HEADER_LENGTH; end += table->length; while (p < end) { h = (struct ivhd_header *)p; switch (h->type) { case ACPI_IVHD_TYPE: find_last_devid_from_ivhd(h); break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } /**************************************************************************** * * The following functions belong the the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific * data structures, initialize the device/alias/rlookup table and also * basically initialize the hardware. * ****************************************************************************/ /* * Allocates the command buffer. This buffer is per AMD IOMMU. We can * write commands to that buffer later and the IOMMU will execute them * asynchronously */ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) { u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(CMD_BUFFER_SIZE)); if (cmd_buf == NULL) return NULL; iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; return cmd_buf; } /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) { iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } /* * This function writes the command buffer address to the hardware and * enables it. */ static void iommu_enable_command_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->cmd_buf == NULL); entry = (u64)virt_to_phys(iommu->cmd_buf); entry |= MMIO_CMD_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, &entry, sizeof(entry)); amd_iommu_reset_cmd_buffer(iommu); iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); } static void __init free_command_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->cmd_buf, get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) { iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(EVT_BUFFER_SIZE)); if (iommu->evt_buf == NULL) return NULL; iommu->evt_buf_size = EVT_BUFFER_SIZE; return iommu->evt_buf; } static void iommu_enable_event_buffer(struct amd_iommu *iommu) { u64 entry; BUG_ON(iommu->evt_buf == NULL); entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } static void __init free_event_buffer(struct amd_iommu *iommu) { free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); } /* allocates the memory where the IOMMU will log its events to */ static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) { iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(PPR_LOG_SIZE)); if (iommu->ppr_log == NULL) return NULL; return iommu->ppr_log; } static void iommu_enable_ppr_log(struct amd_iommu *iommu) { u64 entry; if (iommu->ppr_log == NULL) return; entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, &entry, sizeof(entry)); /* set head and tail to zero manually */ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); iommu_feature_enable(iommu, CONTROL_PPR_EN); } static void __init free_ppr_log(struct amd_iommu *iommu) { if (iommu->ppr_log == NULL) return; free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); } static void iommu_enable_gt(struct amd_iommu *iommu) { if (!iommu_feature(iommu, FEATURE_GT)) return; iommu_feature_enable(iommu, CONTROL_GT_EN); } /* sets a specific bit in the device table entry. */ static void set_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); } static int get_dev_entry_bit(u16 devid, u8 bit) { int i = (bit >> 6) & 0x03; int _bit = bit & 0x3f; return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; } void amd_iommu_apply_erratum_63(u16 devid) { int sysmgt; sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); if (sysmgt == 0x01) set_dev_entry_bit(devid, DEV_ENTRY_IW); } /* Writes the specific IOMMU for a device into the rlookup table */ static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) { amd_iommu_rlookup_table[devid] = iommu; } /* * This function takes the device specific flags read from the ACPI * table and sets up the device table entry with that information */ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, u16 devid, u32 flags, u32 ext_flags) { if (flags & ACPI_DEVFLAG_INITPASS) set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); if (flags & ACPI_DEVFLAG_EXTINT) set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); if (flags & ACPI_DEVFLAG_NMI) set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); if (flags & ACPI_DEVFLAG_SYSMGT1) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); if (flags & ACPI_DEVFLAG_SYSMGT2) set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); if (flags & ACPI_DEVFLAG_LINT0) set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); if (flags & ACPI_DEVFLAG_LINT1) set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); amd_iommu_apply_erratum_63(devid); set_iommu_for_device(iommu, devid); } /* * Reads the device exclusion range from ACPI and initialize IOMMU with * it */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) { struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) return; if (iommu) { /* * We only can configure exclusion ranges per IOMMU, not * per device. But we can enable the exclusion range per * device. This is done here */ set_dev_entry_bit(m->devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } } /* * This function reads some important data from the IOMMU PCI space and * initializes the driver data structure with it. It reads the hardware * capabilities and the first/last device entries */ static void __init init_iommu_from_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; u32 range, misc, low, high; int i, j; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, &range); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, &misc); iommu->first_device = calc_devid(MMIO_GET_BUS(range), MMIO_GET_FD(range)); iommu->last_device = calc_devid(MMIO_GET_BUS(range), MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) amd_iommu_iotlb_sup = false; /* read extended feature bits */ low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); iommu->features = ((u64)high << 32) | low; if (iommu_feature(iommu, FEATURE_GT)) { int glxval; u32 pasids; u64 shift; shift = iommu->features & FEATURE_PASID_MASK; shift >>= FEATURE_PASID_SHIFT; pasids = (1 << shift); amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); glxval = iommu->features & FEATURE_GLXVAL_MASK; glxval >>= FEATURE_GLXVAL_SHIFT; if (amd_iommu_max_glx_val == -1) amd_iommu_max_glx_val = glxval; else amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); } if (iommu_feature(iommu, FEATURE_GT) && iommu_feature(iommu, FEATURE_PPR)) { iommu->is_iommu_v2 = true; amd_iommu_v2_present = true; } if (!is_rd890_iommu(iommu->dev)) return; /* * Some rd890 systems may not be fully reconfigured by the BIOS, so * it's necessary for us to store this information so it can be * reprogrammed on resume */ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, &iommu->stored_addr_lo); pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, &iommu->stored_addr_hi); /* Low bit locks writes to configuration space */ iommu->stored_addr_lo &= ~1; for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); for (i = 0; i < 0x83; i++) iommu->stored_l2[i] = iommu_read_l2(iommu, i); } /* * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_header *h) { u8 *p = (u8 *)h; u8 *end = p, flags = 0; u16 devid = 0, devid_start = 0, devid_to = 0; u32 dev_i, ext_flags = 0; bool alias = false; struct ivhd_entry *e; /* * First save the recommended feature enable bits from ACPI */ iommu->acpi_flags = h->flags; /* * Done. Now parse the device entries */ p += sizeof(struct ivhd_header); end += h->length; while (p < end) { e = (struct ivhd_entry *)p; switch (e->type) { case IVHD_DEV_ALL: DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" " last device %02x:%02x.%x flags: %02x\n", PCI_BUS(iommu->first_device), PCI_SLOT(iommu->first_device), PCI_FUNC(iommu->first_device), PCI_BUS(iommu->last_device), PCI_SLOT(iommu->last_device), PCI_FUNC(iommu->last_device), e->flags); for (dev_i = iommu->first_device; dev_i <= iommu->last_device; ++dev_i) set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); break; case IVHD_DEV_SELECT: DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " "flags: %02x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, 0); break; case IVHD_DEV_SELECT_RANGE_START: DUMP_printk(" DEV_SELECT_RANGE_START\t " "devid: %02x:%02x.%x flags: %02x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags); devid_start = e->devid; flags = e->flags; ext_flags = 0; alias = false; break; case IVHD_DEV_ALIAS: DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " "flags: %02x devid_to: %02x:%02x.%x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid = e->devid; devid_to = e->ext >> 8; set_dev_entry_from_acpi(iommu, devid , e->flags, 0); set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); amd_iommu_alias_table[devid] = devid_to; break; case IVHD_DEV_ALIAS_RANGE: DUMP_printk(" DEV_ALIAS_RANGE\t\t " "devid: %02x:%02x.%x flags: %02x " "devid_to: %02x:%02x.%x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, PCI_BUS(e->ext >> 8), PCI_SLOT(e->ext >> 8), PCI_FUNC(e->ext >> 8)); devid_start = e->devid; flags = e->flags; devid_to = e->ext >> 8; ext_flags = 0; alias = true; break; case IVHD_DEV_EXT_SELECT: DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " "flags: %02x ext: %08x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid = e->devid; set_dev_entry_from_acpi(iommu, devid, e->flags, e->ext); break; case IVHD_DEV_EXT_SELECT_RANGE: DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " "%02x:%02x.%x flags: %02x ext: %08x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid), e->flags, e->ext); devid_start = e->devid; flags = e->flags; ext_flags = e->ext; alias = false; break; case IVHD_DEV_RANGE_END: DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", PCI_BUS(e->devid), PCI_SLOT(e->devid), PCI_FUNC(e->devid)); devid = e->devid; for (dev_i = devid_start; dev_i <= devid; ++dev_i) { if (alias) { amd_iommu_alias_table[dev_i] = devid_to; set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags); } set_dev_entry_from_acpi(iommu, dev_i, flags, ext_flags); } break; default: break; } p += ivhd_entry_length(p); } } /* Initializes the device->iommu mapping for the driver */ static int __init init_iommu_devices(struct amd_iommu *iommu) { u32 i; for (i = iommu->first_device; i <= iommu->last_device; ++i) set_iommu_for_device(iommu, i); return 0; } static void __init free_iommu_one(struct amd_iommu *iommu) { free_command_buffer(iommu); free_event_buffer(iommu); free_ppr_log(iommu); iommu_unmap_mmio_space(iommu); } static void __init free_iommu_all(void) { struct amd_iommu *iommu, *next; for_each_iommu_safe(iommu, next) { list_del(&iommu->list); free_iommu_one(iommu); kfree(iommu); } } /* * This function clues the initialization function for one IOMMU * together and also allocates the command buffer and programs the * hardware. It does NOT enable the IOMMU. This is done afterwards. */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { spin_lock_init(&iommu->lock); /* Add IOMMU to internal data structures */ list_add_tail(&iommu->list, &amd_iommu_list); iommu->index = amd_iommus_present++; if (unlikely(iommu->index >= MAX_IOMMUS)) { WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); return -ENOSYS; } /* Index is fine - add IOMMU to the array */ amd_iommus[iommu->index] = iommu; /* * Copy data from ACPI table entry to the iommu struct */ iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); if (!iommu->dev) return 1; iommu->cap_ptr = h->cap_ptr; iommu->pci_seg = h->pci_seg; iommu->mmio_phys = h->mmio_phys; iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); if (!iommu->mmio_base) return -ENOMEM; iommu->cmd_buf = alloc_command_buffer(iommu); if (!iommu->cmd_buf) return -ENOMEM; iommu->evt_buf = alloc_event_buffer(iommu); if (!iommu->evt_buf) return -ENOMEM; iommu->int_enabled = false; init_iommu_from_pci(iommu); init_iommu_from_acpi(iommu, h); init_iommu_devices(iommu); if (iommu_feature(iommu, FEATURE_PPR)) { iommu->ppr_log = alloc_ppr_log(iommu); if (!iommu->ppr_log) return -ENOMEM; } if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) amd_iommu_np_cache = true; return pci_enable_device(iommu->dev); } /* * Iterates over all IOMMU entries in the ACPI table, allocates the * IOMMU structure and initializes it with init_iommu_one() */ static int __init init_iommu_all(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivhd_header *h; struct amd_iommu *iommu; int ret; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { h = (struct ivhd_header *)p; switch (*p) { case ACPI_IVHD_TYPE: DUMP_printk("device: %02x:%02x.%01x cap: %04x " "seg: %d flags: %01x info %04x\n", PCI_BUS(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr, h->pci_seg, h->flags, h->info); DUMP_printk(" mmio-addr: %016llx\n", h->mmio_phys); iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); if (iommu == NULL) { amd_iommu_init_err = -ENOMEM; return 0; } ret = init_iommu_one(iommu, h); if (ret) { amd_iommu_init_err = ret; return 0; } break; default: break; } p += h->length; } WARN_ON(p != end); return 0; } /**************************************************************************** * * The following functions initialize the MSI interrupts for all IOMMUs * in the system. Its a bit challenging because there could be multiple * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per * pci_dev. * ****************************************************************************/ static int iommu_setup_msi(struct amd_iommu *iommu) { int r; r = pci_enable_msi(iommu->dev); if (r) return r; r = request_threaded_irq(iommu->dev->irq, amd_iommu_int_handler, amd_iommu_int_thread, 0, "AMD-Vi", iommu->dev); if (r) { pci_disable_msi(iommu->dev); return r; } iommu->int_enabled = true; return 0; } static int iommu_init_msi(struct amd_iommu *iommu) { int ret; if (iommu->int_enabled) goto enable_faults; if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) ret = iommu_setup_msi(iommu); else ret = -ENODEV; if (ret) return ret; enable_faults: iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); if (iommu->ppr_log != NULL) iommu_feature_enable(iommu, CONTROL_PPFINT_EN); return 0; } /**************************************************************************** * * The next functions belong to the third pass of parsing the ACPI * table. In this last pass the memory mapping requirements are * gathered (like exclusion and unity mapping reanges). * ****************************************************************************/ static void __init free_unity_maps(void) { struct unity_map_entry *entry, *next; list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { list_del(&entry->list); kfree(entry); } } /* called when we find an exclusion range definition in ACPI */ static int __init init_exclusion_range(struct ivmd_header *m) { int i; switch (m->type) { case ACPI_IVMD_TYPE: set_device_exclusion_range(m->devid, m); break; case ACPI_IVMD_TYPE_ALL: for (i = 0; i <= amd_iommu_last_bdf; ++i) set_device_exclusion_range(i, m); break; case ACPI_IVMD_TYPE_RANGE: for (i = m->devid; i <= m->aux; ++i) set_device_exclusion_range(i, m); break; default: break; } return 0; } /* called for unity map ACPI definition */ static int __init init_unity_map_range(struct ivmd_header *m) { struct unity_map_entry *e = 0; char *s; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; switch (m->type) { default: kfree(e); return 0; case ACPI_IVMD_TYPE: s = "IVMD_TYPEi\t\t\t"; e->devid_start = e->devid_end = m->devid; break; case ACPI_IVMD_TYPE_ALL: s = "IVMD_TYPE_ALL\t\t"; e->devid_start = 0; e->devid_end = amd_iommu_last_bdf; break; case ACPI_IVMD_TYPE_RANGE: s = "IVMD_TYPE_RANGE\t\t"; e->devid_start = m->devid; e->devid_end = m->aux; break; } e->address_start = PAGE_ALIGN(m->range_start); e->address_end = e->address_start + PAGE_ALIGN(m->range_length); e->prot = m->flags >> 1; DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" " range_start: %016llx range_end: %016llx flags: %x\n", s, PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), e->address_start, e->address_end, m->flags); list_add_tail(&e->list, &amd_iommu_unity_map); return 0; } /* iterates over all memory definitions we find in the ACPI table */ static int __init init_memory_definitions(struct acpi_table_header *table) { u8 *p = (u8 *)table, *end = (u8 *)table; struct ivmd_header *m; end += table->length; p += IVRS_HEADER_LENGTH; while (p < end) { m = (struct ivmd_header *)p; if (m->flags & IVMD_FLAG_EXCL_RANGE) init_exclusion_range(m); else if (m->flags & IVMD_FLAG_UNITY_MAP) init_unity_map_range(m); p += m->length; } return 0; } /* * Init the device table to not allow DMA access for devices and * suppress all page faults */ static void init_device_table(void) { u32 devid; for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); } } static void iommu_init_flags(struct amd_iommu *iommu) { iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : iommu_feature_disable(iommu, CONTROL_PASSPW_EN); iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? iommu_feature_enable(iommu, CONTROL_ISOC_EN) : iommu_feature_disable(iommu, CONTROL_ISOC_EN); /* * make IOMMU memory accesses cache coherent */ iommu_feature_enable(iommu, CONTROL_COHERENT_EN); /* Set IOTLB invalidation timeout to 1s */ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); } static void iommu_apply_resume_quirks(struct amd_iommu *iommu) { int i, j; u32 ioc_feature_control; struct pci_dev *pdev = NULL; /* RD890 BIOSes may not have completely reconfigured the iommu */ if (!is_rd890_iommu(iommu->dev)) return; /* * First, we need to ensure that the iommu is enabled. This is * controlled by a register in the northbridge */ pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); if (!pdev) return; /* Select Northbridge indirect register 0x75 and enable writing */ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); pci_read_config_dword(pdev, 0x64, &ioc_feature_control); /* Enable the iommu */ if (!(ioc_feature_control & 0x1)) pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); pci_dev_put(pdev); /* Restore the iommu BAR */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo); pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, iommu->stored_addr_hi); /* Restore the l1 indirect regs for each of the 6 l1s */ for (i = 0; i < 6; i++) for (j = 0; j < 0x12; j++) iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); /* Restore the l2 indirect regs */ for (i = 0; i < 0x83; i++) iommu_write_l2(iommu, i, iommu->stored_l2[i]); /* Lock PCI setup registers */ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, iommu->stored_addr_lo | 1); } /* * This function finally enables all IOMMUs found in the system after * they have been initialized */ static void enable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) { iommu_disable(iommu); iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); iommu_enable_ppr_log(iommu); iommu_enable_gt(iommu); iommu_set_exclusion_range(iommu); iommu_enable(iommu); iommu_flush_all_caches(iommu); } } static void disable_iommus(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_disable(iommu); } /* * Suspend/Resume support * disable suspend until real resume implemented */ static void amd_iommu_resume(void) { struct amd_iommu *iommu; for_each_iommu(iommu) iommu_apply_resume_quirks(iommu); /* re-load the hardware */ enable_iommus(); amd_iommu_enable_interrupts(); } static int amd_iommu_suspend(void) { /* disable IOMMUs to go out of the way for BIOS */ disable_iommus(); return 0; } static struct syscore_ops amd_iommu_syscore_ops = { .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, }; static void __init free_on_init_error(void) { amd_iommu_uninit_devices(); free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, get_order(MAX_DOMAIN_ID/8)); free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); free_pages((unsigned long)amd_iommu_alias_table, get_order(alias_table_size)); free_pages((unsigned long)amd_iommu_dev_table, get_order(dev_table_size)); free_iommu_all(); free_unity_maps(); #ifdef CONFIG_GART_IOMMU /* * We failed to initialize the AMD IOMMU - try fallback to GART * if possible. */ gart_iommu_init(); #endif } /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt * remapping setup code. * * This function basically parses the ACPI table for AMD IOMMU (IVRS) * three times: * * 1 pass) Find the highest PCI device id the driver has to handle. * Upon this information the size of the data structures is * determined that needs to be allocated. * * 2 pass) Initialize the data structures just allocated with the * information in the ACPI table about available AMD IOMMUs * in the system. It also maps the PCI devices in the * system to specific IOMMUs * * 3 pass) After the basic data structures are allocated and * initialized we update them with information about memory * remapping requirements parsed out of the ACPI table in * this last pass. * * After everything is set up the IOMMUs are enabled and the necessary * hotplug and suspend notifiers are registered. */ int __init amd_iommu_init_hardware(void) { int i, ret = 0; if (!amd_iommu_detected) return -ENODEV; if (amd_iommu_dev_table != NULL) { /* Hardware already initialized */ return 0; } /* * First parse ACPI tables to find the largest Bus/Dev/Func * we need to handle. Upon this information the shared data * structures for the IOMMUs in the system will be allocated */ if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) return -ENODEV; ret = amd_iommu_init_err; if (ret) goto out; dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); /* Device table - directly used by all IOMMUs */ ret = -ENOMEM; amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(dev_table_size)); if (amd_iommu_dev_table == NULL) goto out; /* * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the * IOMMU see for that device */ amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, get_order(alias_table_size)); if (amd_iommu_alias_table == NULL) goto free; /* IOMMU rlookup table - find the IOMMU for a specific device */ amd_iommu_rlookup_table = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(rlookup_table_size)); if (amd_iommu_rlookup_table == NULL) goto free; amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, get_order(MAX_DOMAIN_ID/8)); if (amd_iommu_pd_alloc_bitmap == NULL) goto free; /* init the device table */ init_device_table(); /* * let all alias entries point to itself */ for (i = 0; i <= amd_iommu_last_bdf; ++i) amd_iommu_alias_table[i] = i; /* * never allocate domain 0 because its used as the non-allocated and * error value placeholder */ amd_iommu_pd_alloc_bitmap[0] = 1; spin_lock_init(&amd_iommu_pd_lock); /* * now the data structures are allocated and basically initialized * start the real acpi table scan */ ret = -ENODEV; if (acpi_table_parse("IVRS", init_iommu_all) != 0) goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } if (acpi_table_parse("IVRS", init_memory_definitions) != 0) goto free; if (amd_iommu_init_err) { ret = amd_iommu_init_err; goto free; } ret = amd_iommu_init_devices(); if (ret) goto free; enable_iommus(); amd_iommu_init_notifier(); register_syscore_ops(&amd_iommu_syscore_ops); out: return ret; free: free_on_init_error(); return ret; } static int amd_iommu_enable_interrupts(void) { struct amd_iommu *iommu; int ret = 0; for_each_iommu(iommu) { ret = iommu_init_msi(iommu); if (ret) goto out; } out: return ret; } /* * This is the core init function for AMD IOMMU hardware in the system. * This function is called from the generic x86 DMA layer initialization * code. * * The function calls amd_iommu_init_hardware() to setup and enable the * IOMMU hardware if this has not happened yet. After that the driver * registers for the DMA-API and for the IOMMU-API as necessary. */ static int __init amd_iommu_init(void) { int ret = 0; ret = amd_iommu_init_hardware(); if (ret) goto out; ret = amd_iommu_enable_interrupts(); if (ret) goto free; if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else ret = amd_iommu_init_dma_ops(); if (ret) goto free; amd_iommu_init_api(); if (iommu_pass_through) goto out; if (amd_iommu_unmap_flush) printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); else printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); x86_platform.iommu_shutdown = disable_iommus; out: return ret; free: disable_iommus(); free_on_init_error(); goto out; } /**************************************************************************** * * Early detect code. This code runs at IOMMU detection time in the DMA * layer. It just looks if there is an IVRS ACPI table to detect AMD * IOMMUs * ****************************************************************************/ static int __init early_amd_iommu_detect(struct acpi_table_header *table) { return 0; } int __init amd_iommu_detect(void) { if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; if (amd_iommu_disabled) return -ENODEV; if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { iommu_detected = 1; amd_iommu_detected = 1; x86_init.iommu.iommu_init = amd_iommu_init; /* Make sure ACS will be enabled */ pci_request_acs(); return 1; } return -ENODEV; } /**************************************************************************** * * Parsing functions for the AMD IOMMU specific kernel command line * options. * ****************************************************************************/ static int __init parse_amd_iommu_dump(char *str) { amd_iommu_dump = true; return 1; } static int __init parse_amd_iommu_options(char *str) { for (; *str; ++str) { if (strncmp(str, "fullflush", 9) == 0) amd_iommu_unmap_flush = true; if (strncmp(str, "off", 3) == 0) amd_iommu_disabled = true; if (strncmp(str, "force_isolation", 15) == 0) amd_iommu_force_isolation = true; } return 1; } __setup("amd_iommu_dump", parse_amd_iommu_dump); __setup("amd_iommu=", parse_amd_iommu_options); IOMMU_INIT_FINISH(amd_iommu_detect, gart_iommu_hole_init, 0, 0); bool amd_iommu_v2_supported(void) { return amd_iommu_v2_present; } EXPORT_SYMBOL(amd_iommu_v2_supported);
gpl-2.0
JustAkan/Oxygen_united_kernel-gproj-lollipop
fs/binfmt_aout.c
4831
12014
/* * linux/fs/binfmt_aout.c * * Copyright (C) 1991, 1992, 1996 Linus Torvalds */ #include <linux/module.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/a.out.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/binfmts.h> #include <linux/personality.h> #include <linux/init.h> #include <linux/coredump.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/a.out-core.h> static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs); static int load_aout_library(struct file*); static int aout_core_dump(struct coredump_params *cprm); static struct linux_binfmt aout_format = { .module = THIS_MODULE, .load_binary = load_aout_binary, .load_shlib = load_aout_library, .core_dump = aout_core_dump, .min_coredump = PAGE_SIZE }; #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) static int set_brk(unsigned long start, unsigned long end) { start = PAGE_ALIGN(start); end = PAGE_ALIGN(end); if (end > start) { unsigned long addr; addr = vm_brk(start, end - start); if (BAD_ADDR(addr)) return addr; } return 0; } /* * Routine writes a core dump image in the current directory. * Currently only a stub-function. * * Note that setuid/setgid files won't make a core-dump if the uid/gid * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" * field, which also makes sure the core-dumps won't be recursive if the * dumping of the process results in another error.. */ static int aout_core_dump(struct coredump_params *cprm) { struct file *file = cprm->file; mm_segment_t fs; int has_dumped = 0; void __user *dump_start; int dump_size; struct user dump; #ifdef __alpha__ # define START_DATA(u) ((void __user *)u.start_data) #else # define START_DATA(u) ((void __user *)((u.u_tsize << PAGE_SHIFT) + \ u.start_code)) #endif # define START_STACK(u) ((void __user *)u.start_stack) fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; current->flags |= PF_DUMPCORE; strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); dump.u_ar0 = offsetof(struct user, regs); dump.signal = cprm->signr; aout_dump_thread(cprm->regs, &dump); /* If the size of the dump file exceeds the rlimit, then see what would happen if we wrote the stack, but not the data area. */ if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) dump.u_dsize = 0; /* Make sure we have enough room to write the stack and data areas. */ if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) dump.u_ssize = 0; /* make sure we actually have a data and stack area to dump */ set_fs(USER_DS); if (!access_ok(VERIFY_READ, START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) dump.u_dsize = 0; if (!access_ok(VERIFY_READ, START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) dump.u_ssize = 0; set_fs(KERNEL_DS); /* struct user */ if (!dump_write(file, &dump, sizeof(dump))) goto end_coredump; /* Now dump all of the user data. Include malloced stuff as well */ if (!dump_seek(cprm->file, PAGE_SIZE - sizeof(dump))) goto end_coredump; /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ if (dump.u_dsize != 0) { dump_start = START_DATA(dump); dump_size = dump.u_dsize << PAGE_SHIFT; if (!dump_write(file, dump_start, dump_size)) goto end_coredump; } /* Now prepare to dump the stack area */ if (dump.u_ssize != 0) { dump_start = START_STACK(dump); dump_size = dump.u_ssize << PAGE_SHIFT; if (!dump_write(file, dump_start, dump_size)) goto end_coredump; } end_coredump: set_fs(fs); return has_dumped; } /* * create_aout_tables() parses the env- and arg-strings in new user * memory and creates the pointer tables from them, and puts their * addresses on the "stack", returning the new stack pointer value. */ static unsigned long __user *create_aout_tables(char __user *p, struct linux_binprm * bprm) { char __user * __user *argv; char __user * __user *envp; unsigned long __user *sp; int argc = bprm->argc; int envc = bprm->envc; sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p); #ifdef __alpha__ /* whee.. test-programs are so much fun. */ put_user(0, --sp); put_user(0, --sp); if (bprm->loader) { put_user(0, --sp); put_user(1003, --sp); put_user(bprm->loader, --sp); put_user(1002, --sp); } put_user(bprm->exec, --sp); put_user(1001, --sp); #endif sp -= envc+1; envp = (char __user * __user *) sp; sp -= argc+1; argv = (char __user * __user *) sp; #ifndef __alpha__ put_user((unsigned long) envp,--sp); put_user((unsigned long) argv,--sp); #endif put_user(argc,--sp); current->mm->arg_start = (unsigned long) p; while (argc-->0) { char c; put_user(p,argv++); do { get_user(c,p++); } while (c); } put_user(NULL,argv); current->mm->arg_end = current->mm->env_start = (unsigned long) p; while (envc-->0) { char c; put_user(p,envp++); do { get_user(c,p++); } while (c); } put_user(NULL,envp); current->mm->env_end = (unsigned long) p; return sp; } /* * These are the functions used to load a.out style executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct exec ex; unsigned long error; unsigned long fd_offset; unsigned long rlim; int retval; ex = *((struct exec *) bprm->buf); /* exec-header */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || i_size_read(bprm->file->f_path.dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { return -ENOEXEC; } /* * Requires a mmap handler. This prevents people from using a.out * as part of an exploit attack against /proc-related vulnerabilities. */ if (!bprm->file->f_op || !bprm->file->f_op->mmap) return -ENOEXEC; fd_offset = N_TXTOFF(ex); /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = rlimit(RLIMIT_DATA); if (rlim >= RLIM_INFINITY) rlim = ~0; if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) return retval; /* OK, This is the point of no return */ #ifdef __alpha__ SET_AOUT_PERSONALITY(bprm, ex); #else set_personality(PER_LINUX); #endif setup_new_exec(bprm); current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); current->mm->end_data = ex.a_data + (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { /* Someone check-me: is this error path enough? */ send_sig(SIGKILL, current, 0); return retval; } install_exec_creds(bprm); if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; loff_t pos; text_addr = N_TXTADDR(ex); #ifdef __alpha__ pos = fd_offset; map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1; #else pos = 32; map_size = ex.a_text+ex.a_data; #endif error = vm_brk(text_addr & PAGE_MASK, map_size); if (error != (text_addr & PAGE_MASK)) { send_sig(SIGKILL, current, 0); return error; } error = bprm->file->f_op->read(bprm->file, (char __user *)text_addr, ex.a_text+ex.a_data, &pos); if ((signed long)error < 0) { send_sig(SIGKILL, current, 0); return error; } flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data); } else { if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && (N_MAGIC(ex) != NMAGIC) && printk_ratelimit()) { printk(KERN_NOTICE "executable not page aligned\n"); } if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit()) { printk(KERN_WARNING "fd_offset is not page aligned. Please convert program: %s\n", bprm->file->f_path.dentry->d_name.name); } if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { loff_t pos = fd_offset; vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); flush_icache_range((unsigned long) N_TXTADDR(ex), (unsigned long) N_TXTADDR(ex) + ex.a_text+ex.a_data); goto beyond_if; } error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } } beyond_if: set_binfmt(&aout_format); retval = set_brk(current->mm->start_brk, current->mm->brk); if (retval < 0) { send_sig(SIGKILL, current, 0); return retval; } current->mm->start_stack = (unsigned long) create_aout_tables((char __user *) bprm->p, bprm); #ifdef __alpha__ regs->gp = ex.a_gpvalue; #endif start_thread(regs, ex.a_entry, current->mm->start_stack); return 0; } static int load_aout_library(struct file *file) { struct inode * inode; unsigned long bss, start_addr, len; unsigned long error; int retval; struct exec ex; inode = file->f_path.dentry->d_inode; retval = -ENOEXEC; error = kernel_read(file, 0, (char *) &ex, sizeof(ex)); if (error != sizeof(ex)) goto out; /* We come in here for the regular a.out style of shared libraries */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) || i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { goto out; } /* * Requires a mmap handler. This prevents people from using a.out * as part of an exploit attack against /proc-related vulnerabilities. */ if (!file->f_op || !file->f_op->mmap) goto out; if (N_FLAGS(ex)) goto out; /* For QMAGIC, the starting address is 0x20 into the page. We mask this off to get the starting address for the page */ start_addr = ex.a_entry & 0xfffff000; if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) { loff_t pos = N_TXTOFF(ex); if (printk_ratelimit()) { printk(KERN_WARNING "N_TXTOFF is not page aligned. Please convert library: %s\n", file->f_path.dentry->d_name.name); } vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); file->f_op->read(file, (char __user *)start_addr, ex.a_text + ex.a_data, &pos); flush_icache_range((unsigned long) start_addr, (unsigned long) start_addr + ex.a_text + ex.a_data); retval = 0; goto out; } /* Now use mmap to map the library into memory. */ error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, N_TXTOFF(ex)); retval = error; if (error != start_addr) goto out; len = PAGE_ALIGN(ex.a_text + ex.a_data); bss = ex.a_text + ex.a_data + ex.a_bss; if (bss > len) { error = vm_brk(start_addr + len, bss - len); retval = error; if (error != start_addr + len) goto out; } retval = 0; out: return retval; } static int __init init_aout_binfmt(void) { register_binfmt(&aout_format); return 0; } static void __exit exit_aout_binfmt(void) { unregister_binfmt(&aout_format); } core_initcall(init_aout_binfmt); module_exit(exit_aout_binfmt); MODULE_LICENSE("GPL");
gpl-2.0
WisniaPL/android_kernel_leeco_MT6795
drivers/staging/octeon/ethernet-xaui.c
7903
3667
/********************************************************************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2007 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information **********************************************************************/ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/ratelimit.h> #include <net/dst.h> #include <asm/octeon/octeon.h> #include "ethernet-defines.h" #include "octeon-ethernet.h" #include "ethernet-util.h" #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-gmxx-defs.h> int cvm_oct_xaui_open(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 1; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); if (!octeon_is_simulation()) { link_info = cvmx_helper_link_get(priv->port); if (!link_info.s.link_up) netif_carrier_off(dev); } return 0; } int cvm_oct_xaui_stop(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); int interface = INTERFACE(priv->port); int index = INDEX(priv->port); gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); return 0; } static void cvm_oct_xaui_poll(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; link_info = cvmx_helper_link_get(priv->port); if (link_info.u64 == priv->link_info) return; link_info = cvmx_helper_link_autoconf(priv->port); priv->link_info = link_info.u64; /* Tell Linux */ if (link_info.s.link_up) { if (!netif_carrier_ok(dev)) netif_carrier_on(dev); if (priv->queue != -1) printk_ratelimited ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port, priv->queue); else printk_ratelimited ("%s: %u Mbps %s duplex, port %2d, POW\n", dev->name, link_info.s.speed, (link_info.s.full_duplex) ? "Full" : "Half", priv->port); } else { if (netif_carrier_ok(dev)) netif_carrier_off(dev); printk_ratelimited("%s: Link down\n", dev->name); } } int cvm_oct_xaui_init(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); cvm_oct_common_init(dev); dev->netdev_ops->ndo_stop(dev); if (!octeon_is_simulation() && priv->phydev == NULL) priv->poll = cvm_oct_xaui_poll; return 0; } void cvm_oct_xaui_uninit(struct net_device *dev) { cvm_oct_common_uninit(dev); }
gpl-2.0
chris4824/kernel_samsung_jf
drivers/gpu/drm/mga/mga_state.c
8415
29217
/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*- * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Jeff Hartmann <jhartmann@valinux.com> * Keith Whitwell <keith@tungstengraphics.com> * * Rewritten by: * Gareth Hughes <gareth@valinux.com> */ #include "drmP.h" #include "drm.h" #include "mga_drm.h" #include "mga_drv.h" /* ================================================================ * DMA hardware state programming functions */ static void mga_emit_clip_rect(drm_mga_private_t *dev_priv, struct drm_clip_rect *box) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; unsigned int pitch = dev_priv->front_pitch; DMA_LOCALS; BEGIN_DMA(2); /* Force reset of DWGCTL on G400 (eliminates clip disable bit). */ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, MGA_LEN + MGA_EXEC, 0x80000000, MGA_DWGCTL, ctx->dwgctl, MGA_LEN + MGA_EXEC, 0x80000000); } DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; DMA_LOCALS; BEGIN_DMA(3); DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_FCOL, ctx->fcol, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; DMA_LOCALS; BEGIN_DMA(4); DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_WFLAG1, ctx->wflag, MGA_TDUALSTAGE0, ctx->tdualstage0, MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol); DMA_BLOCK(MGA_STENCIL, ctx->stencil, MGA_STENCILCTL, ctx->stencilctl, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; BEGIN_DMA(4); DMA_BLOCK(MGA_TEXCTL2, tex->texctl2, MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth); DMA_BLOCK(MGA_WR34, tex->texheight, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; DMA_LOCALS; /* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ /* tex->texctl, tex->texctl2); */ BEGIN_DMA(6); DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC, MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC); DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC, MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC, MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; DMA_LOCALS; /* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */ /* tex->texctl, tex->texctl2); */ BEGIN_DMA(5); DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 | MGA_MAP1_ENABLE | MGA_G400_TC2_MAGIC), MGA_TEXCTL, tex->texctl, MGA_TEXFILTER, tex->texfilter, MGA_TEXBORDERCOL, tex->texbordercol); DMA_BLOCK(MGA_TEXORG, tex->texorg, MGA_TEXORG1, tex->texorg1, MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3); DMA_BLOCK(MGA_TEXORG4, tex->texorg4, MGA_TEXWIDTH, tex->texwidth, MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000); DMA_BLOCK(MGA_WR57, 0x00000000, MGA_WR53, 0x00000000, MGA_WR61, 0x00000000, MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC); DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC, MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff, MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC); ADVANCE_DMA(); } static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; BEGIN_DMA(3); DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND, MGA_WVRTXSZ, 0x00000007, MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000); DMA_BLOCK(MGA_WR25, 0x00000100, MGA_WR34, 0x00000000, MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); /* Padding required due to hardware bug. */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | MGA_WMODE_START | dev_priv->wagp_enable)); ADVANCE_DMA(); } static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; DMA_LOCALS; /* printk("mga_g400_emit_pipe %x\n", pipe); */ BEGIN_DMA(10); DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); if (pipe & MGA_T2) { DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x1e000000); } else { if (dev_priv->warp_pipe & MGA_T2) { /* Flush the WARP pipe */ DMA_BLOCK(MGA_YDST, 0x00000000, MGA_FXLEFT, 0x00000000, MGA_FXRIGHT, 0x00000001, MGA_DWGCTL, MGA_DWGCTL_FLUSH); DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001, MGA_DWGSYNC, 0x00007000, MGA_TEXCTL2, MGA_G400_TC2_MAGIC, MGA_LEN + MGA_EXEC, 0x00000000); DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX | MGA_G400_TC2_MAGIC), MGA_LEN + MGA_EXEC, 0x00000000, MGA_TEXCTL2, MGA_G400_TC2_MAGIC, MGA_DMAPAD, 0x00000000); } DMA_BLOCK(MGA_WVRTXSZ, 0x00001807, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x00000000, MGA_WACCEPTSEQ, 0x18000000); } DMA_BLOCK(MGA_WFLAG, 0x00000000, MGA_WFLAG1, 0x00000000, MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000); DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */ MGA_WR57, 0x00000000, /* tex0 */ MGA_WR53, 0x00000000, /* tex1 */ MGA_WR61, 0x00000000); /* tex1 */ DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */ MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */ MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ /* Padding required due to hardware bug */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | MGA_WMODE_START | dev_priv->wagp_enable)); ADVANCE_DMA(); } static void mga_g200_emit_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { mga_g200_emit_pipe(dev_priv); dev_priv->warp_pipe = sarea_priv->warp_pipe; } if (dirty & MGA_UPLOAD_CONTEXT) { mga_g200_emit_context(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; } if (dirty & MGA_UPLOAD_TEX0) { mga_g200_emit_tex0(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; } } static void mga_g400_emit_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; int multitex = sarea_priv->warp_pipe & MGA_T2; if (sarea_priv->warp_pipe != dev_priv->warp_pipe) { mga_g400_emit_pipe(dev_priv); dev_priv->warp_pipe = sarea_priv->warp_pipe; } if (dirty & MGA_UPLOAD_CONTEXT) { mga_g400_emit_context(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT; } if (dirty & MGA_UPLOAD_TEX0) { mga_g400_emit_tex0(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX0; } if ((dirty & MGA_UPLOAD_TEX1) && multitex) { mga_g400_emit_tex1(dev_priv); sarea_priv->dirty &= ~MGA_UPLOAD_TEX1; } } /* ================================================================ * SAREA state verification */ /* Disallow all write destinations except the front and backbuffer. */ static int mga_verify_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; if (ctx->dstorg != dev_priv->front_offset && ctx->dstorg != dev_priv->back_offset) { DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n", ctx->dstorg, dev_priv->front_offset, dev_priv->back_offset); ctx->dstorg = 0; return -EINVAL; } return 0; } /* Disallow texture reads from PCI space. */ static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; unsigned int org; org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK); if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) { DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit); tex->texorg = 0; return -EINVAL; } return 0; } static int mga_verify_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; int ret = 0; if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if (dirty & MGA_UPLOAD_CONTEXT) ret |= mga_verify_context(dev_priv); if (dirty & MGA_UPLOAD_TEX0) ret |= mga_verify_tex(dev_priv, 0); if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { if (dirty & MGA_UPLOAD_TEX1) ret |= mga_verify_tex(dev_priv, 1); if (dirty & MGA_UPLOAD_PIPE) ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES); } else { if (dirty & MGA_UPLOAD_PIPE) ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES); } return (ret == 0); } static int mga_verify_iload(drm_mga_private_t *dev_priv, unsigned int dstorg, unsigned int length) { if (dstorg < dev_priv->texture_offset || dstorg + length > (dev_priv->texture_offset + dev_priv->texture_size)) { DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg); return -EINVAL; } if (length & MGA_ILOAD_MASK) { DRM_ERROR("*** bad iload length: 0x%x\n", length & MGA_ILOAD_MASK); return -EINVAL; } return 0; } static int mga_verify_blit(drm_mga_private_t *dev_priv, unsigned int srcorg, unsigned int dstorg) { if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) { DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg); return -EINVAL; } return 0; } /* ================================================================ * */ static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; DRM_DEBUG("\n"); BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); for (i = 0; i < nbox; i++) { struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; DRM_DEBUG(" from=%d,%d to=%d,%d\n", box->x1, box->y1, box->x2, box->y2); if (clear->flags & MGA_FRONT) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->color_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_color, MGA_DSTORG, dev_priv->front_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } if (clear->flags & MGA_BACK) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->color_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_color, MGA_DSTORG, dev_priv->back_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } if (clear->flags & MGA_DEPTH) { BEGIN_DMA(2); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, clear->depth_mask, MGA_YDSTLEN, (box->y1 << 16) | height, MGA_FXBNDRY, (box->x2 << 16) | box->x1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_FCOL, clear->clear_depth, MGA_DSTORG, dev_priv->depth_offset, MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd); ADVANCE_DMA(); } } BEGIN_DMA(1); /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); FLUSH_DMA(); } static void mga_dma_dispatch_swap(struct drm_device *dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; int i; DMA_LOCALS; DRM_DEBUG("\n"); sarea_priv->last_frame.head = dev_priv->prim.tail; sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap; BEGIN_DMA(4 + nbox); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset, MGA_MACCESS, dev_priv->maccess, MGA_SRCORG, dev_priv->back_offset, MGA_AR5, dev_priv->front_pitch); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY); for (i = 0; i < nbox; i++) { struct drm_clip_rect *box = &pbox[i]; u32 height = box->y2 - box->y1; u32 start = box->y1 * dev_priv->front_pitch; DRM_DEBUG(" from=%d,%d to=%d,%d\n", box->x1, box->y1, box->x2, box->y2); DMA_BLOCK(MGA_AR0, start + box->x2 - 1, MGA_AR3, start + box->x1, MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1, MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height); } DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); FLUSH_DMA(); DRM_DEBUG("... done.\n"); } static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 address = (u32) buf->bus_address; u32 length = (u32) buf->used; int i = 0; DMA_LOCALS; DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); if (buf->used) { buf_priv->dispatched = 1; MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); do { if (i < sarea_priv->nbox) { mga_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SECADDRESS, (address | MGA_DMA_VERTEX), MGA_SECEND, ((address + length) | dev_priv->dma_access)); ADVANCE_DMA(); } while (++i < sarea_priv->nbox); } if (buf_priv->discard) { AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } FLUSH_DMA(); } static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 address = (u32) buf->bus_address; int i = 0; DMA_LOCALS; DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end); if (start != end) { buf_priv->dispatched = 1; MGA_EMIT_STATE(dev_priv, sarea_priv->dirty); do { if (i < sarea_priv->nbox) { mga_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]); } BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SETUPADDRESS, address + start, MGA_SETUPEND, ((address + end) | dev_priv->dma_access)); ADVANCE_DMA(); } while (++i < sarea_priv->nbox); } if (buf_priv->discard) { AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } FLUSH_DMA(); } /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM; u32 y2; DMA_LOCALS; DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used); y2 = length / 64; BEGIN_DMA(5); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DSTORG, dstorg, MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64); DMA_BLOCK(MGA_PITCH, 64, MGA_PLNWT, 0xffffffff, MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY); DMA_BLOCK(MGA_AR0, 63, MGA_AR3, 0, MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2); DMA_BLOCK(MGA_PLNWT, ctx->plnwt, MGA_SRCORG, dev_priv->front_offset, MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000); ADVANCE_DMA(); AGE_BUFFER(buf_priv); buf->pending = 0; buf->used = 0; buf_priv->dispatched = 0; mga_freelist_put(dev, buf); FLUSH_DMA(); } static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; struct drm_clip_rect *pbox = sarea_priv->boxes; int nbox = sarea_priv->nbox; u32 scandir = 0, i; DMA_LOCALS; DRM_DEBUG("\n"); BEGIN_DMA(4 + nbox); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000); DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY, MGA_PLNWT, blit->planemask, MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg); DMA_BLOCK(MGA_SGN, scandir, MGA_MACCESS, dev_priv->maccess, MGA_AR5, blit->ydir * blit->src_pitch, MGA_PITCH, blit->dst_pitch); for (i = 0; i < nbox; i++) { int srcx = pbox[i].x1 + blit->delta_sx; int srcy = pbox[i].y1 + blit->delta_sy; int dstx = pbox[i].x1 + blit->delta_dx; int dsty = pbox[i].y1 + blit->delta_dy; int h = pbox[i].y2 - pbox[i].y1; int w = pbox[i].x2 - pbox[i].x1 - 1; int start; if (blit->ydir == -1) srcy = blit->height - srcy - 1; start = srcy * blit->src_pitch + srcx; DMA_BLOCK(MGA_AR0, start + w, MGA_AR3, start, MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff), MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h); } /* Do something to flush AGP? */ /* Force reset of DWGCTL */ DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_PLNWT, ctx->plnwt, MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl); ADVANCE_DMA(); } /* ================================================================ * */ static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_clear_t *clear = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_clear(dev, clear); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_swap(dev); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[vertex->idx]; buf_priv = buf->dev_private; buf->used = vertex->used; buf_priv->discard = vertex->discard; if (!mga_verify_state(dev_priv)) { if (vertex->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_vertex(dev, buf); return 0; } static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t *indices = data; LOCK_TEST_WITH_RETURN(dev, file_priv); if (indices->idx < 0 || indices->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[indices->idx]; buf_priv = buf->dev_private; buf_priv->discard = indices->discard; if (!mga_verify_state(dev_priv)) { if (indices->discard) { if (buf_priv->dispatched == 1) AGE_BUFFER(buf_priv); buf_priv->dispatched = 0; mga_freelist_put(dev, buf); } return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_indices(dev, buf, indices->start, indices->end); return 0; } static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_iload_t *iload = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); #if 0 if (mga_do_wait_for_idle(dev_priv) < 0) { if (MGA_DMA_DEBUG) DRM_INFO("-EBUSY\n"); return -EBUSY; } #endif if (iload->idx < 0 || iload->idx > dma->buf_count) return -EINVAL; buf = dma->buflist[iload->idx]; buf_priv = buf->dev_private; if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) { mga_freelist_put(dev, buf); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_blit_t *blit = data; DRM_DEBUG("\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS; if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg)) return -EINVAL; WRAP_TEST_WITH_RETURN(dev_priv); mga_dma_dispatch_blit(dev, blit); /* Make sure we restore the 3D state next time. */ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT; return 0; } static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); switch (param->param) { case MGA_PARAM_IRQ_NR: value = drm_dev_to_irq(dev); break; case MGA_PARAM_CARD_TYPE: value = dev_priv->chipset; break; default: return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; u32 *fence = data; DMA_LOCALS; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); /* I would normal do this assignment in the declaration of fence, * but dev_priv may be NULL. */ *fence = dev_priv->next_fence_to_post; dev_priv->next_fence_to_post++; BEGIN_DMA(1); DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000); ADVANCE_DMA(); return 0; } static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file * file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; u32 *fence = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); mga_driver_fence_wait(dev, fence); return 0; } struct drm_ioctl_desc mga_ioctls[] = { DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), }; int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
gpl-2.0
thekraven/kernel_samsung_lt02ltespr
arch/mips/wrppmc/pci.c
9439
1335
/* * pci.c: GT64120 PCI support. * * Copyright (C) 2006, Wind River System Inc. Rongkai.Zhan <rongkai.zhan@windriver.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/gt64120.h> extern struct pci_ops gt64xxx_pci0_ops; static struct resource pci0_io_resource = { .name = "pci_0 io", .start = GT_PCI_IO_BASE, .end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1, .flags = IORESOURCE_IO, }; static struct resource pci0_mem_resource = { .name = "pci_0 memory", .start = GT_PCI_MEM_BASE, .end = GT_PCI_MEM_BASE + GT_PCI_MEM_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct pci_controller hose_0 = { .pci_ops = &gt64xxx_pci0_ops, .io_resource = &pci0_io_resource, .mem_resource = &pci0_mem_resource, }; static int __init gt64120_pci_init(void) { u32 tmp; tmp = GT_READ(GT_PCI0_CMD_OFS); /* Huh??? -- Ralf */ tmp = GT_READ(GT_PCI0_BARE_OFS); /* reset the whole PCI I/O space range */ ioport_resource.start = GT_PCI_IO_BASE; ioport_resource.end = GT_PCI_IO_BASE + GT_PCI_IO_SIZE - 1; register_pci_controller(&hose_0); return 0; } arch_initcall(gt64120_pci_init);
gpl-2.0
RaymanFX/kernel_samsung_lt03wifi
drivers/mca/mca-driver.c
9951
1788
/* -*- mode: c; c-basic-offset: 8 -*- */ /* * MCA driver support functions for sysfs. * * (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com> * **----------------------------------------------------------------------------- ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ** ** You should have received a copy of the GNU General Public License ** along with this program; if not, write to the Free Software ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ** **----------------------------------------------------------------------------- */ #include <linux/device.h> #include <linux/mca.h> #include <linux/module.h> int mca_register_driver(struct mca_driver *mca_drv) { int r; if (MCA_bus) { mca_drv->driver.bus = &mca_bus_type; if ((r = driver_register(&mca_drv->driver)) < 0) return r; mca_drv->integrated_id = 0; } return 0; } EXPORT_SYMBOL(mca_register_driver); int mca_register_driver_integrated(struct mca_driver *mca_driver, int integrated_id) { int r = mca_register_driver(mca_driver); if (!r) mca_driver->integrated_id = integrated_id; return r; } EXPORT_SYMBOL(mca_register_driver_integrated); void mca_unregister_driver(struct mca_driver *mca_drv) { if (MCA_bus) driver_unregister(&mca_drv->driver); } EXPORT_SYMBOL(mca_unregister_driver);
gpl-2.0
mythos234/SimplKernel-LL-BOFJ
sound/i2c/tea6330t.c
9951
11581
/* * Routines for control of the TEA6330T circuit via i2c bus * Sound fader control circuit for car radios by Philips Semiconductors * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tea6330t.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of the TEA6330T circuit via i2c bus"); MODULE_LICENSE("GPL"); #define TEA6330T_ADDR (0x80>>1) /* fixed address */ #define TEA6330T_SADDR_VOLUME_LEFT 0x00 /* volume left */ #define TEA6330T_SADDR_VOLUME_RIGHT 0x01 /* volume right */ #define TEA6330T_SADDR_BASS 0x02 /* bass control */ #define TEA6330T_SADDR_TREBLE 0x03 /* treble control */ #define TEA6330T_SADDR_FADER 0x04 /* fader control */ #define TEA6330T_MFN 0x20 /* mute control for selected channels */ #define TEA6330T_FCH 0x10 /* select fader channels - front or rear */ #define TEA6330T_SADDR_AUDIO_SWITCH 0x05 /* audio switch */ #define TEA6330T_GMU 0x80 /* mute control, general mute */ #define TEA6330T_EQN 0x40 /* equalizer switchover (0=equalizer-on) */ struct tea6330t { struct snd_i2c_device *device; struct snd_i2c_bus *bus; int equalizer; int fader; unsigned char regs[8]; unsigned char mleft, mright; unsigned char bass, treble; unsigned char max_bass, max_treble; }; int snd_tea6330t_detect(struct snd_i2c_bus *bus, int equalizer) { int res; snd_i2c_lock(bus); res = snd_i2c_probeaddr(bus, TEA6330T_ADDR); snd_i2c_unlock(bus); return res; } #if 0 static void snd_tea6330t_set(struct tea6330t *tea, unsigned char addr, unsigned char value) { #if 0 printk(KERN_DEBUG "set - 0x%x/0x%x\n", addr, value); #endif snd_i2c_write(tea->bus, TEA6330T_ADDR, addr, value, 1); } #endif #define TEA6330T_MASTER_VOLUME(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_master_volume, \ .get = snd_tea6330t_get_master_volume, .put = snd_tea6330t_put_master_volume } static int snd_tea6330t_info_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 43; return 0; } static int snd_tea6330t_get_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); snd_i2c_lock(tea->bus); ucontrol->value.integer.value[0] = tea->mleft - 0x14; ucontrol->value.integer.value[1] = tea->mright - 0x14; snd_i2c_unlock(tea->bus); return 0; } static int snd_tea6330t_put_master_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, count, err; unsigned char bytes[3]; unsigned char val1, val2; val1 = (ucontrol->value.integer.value[0] % 44) + 0x14; val2 = (ucontrol->value.integer.value[1] % 44) + 0x14; snd_i2c_lock(tea->bus); change = val1 != tea->mleft || val2 != tea->mright; tea->mleft = val1; tea->mright = val2; count = 0; if (tea->regs[TEA6330T_SADDR_VOLUME_LEFT] != 0) { bytes[count++] = TEA6330T_SADDR_VOLUME_LEFT; bytes[count++] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] = tea->mleft; } if (tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] != 0) { if (count == 0) bytes[count++] = TEA6330T_SADDR_VOLUME_RIGHT; bytes[count++] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] = tea->mright; } if (count > 0) { if ((err = snd_i2c_sendbytes(tea->device, bytes, count)) < 0) change = err; } snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_MASTER_SWITCH(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_master_switch, \ .get = snd_tea6330t_get_master_switch, .put = snd_tea6330t_put_master_switch } #define snd_tea6330t_info_master_switch snd_ctl_boolean_stereo_info static int snd_tea6330t_get_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); snd_i2c_lock(tea->bus); ucontrol->value.integer.value[0] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] == 0 ? 0 : 1; ucontrol->value.integer.value[1] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] == 0 ? 0 : 1; snd_i2c_unlock(tea->bus); return 0; } static int snd_tea6330t_put_master_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[3]; unsigned char oval1, oval2, val1, val2; val1 = ucontrol->value.integer.value[0] & 1; val2 = ucontrol->value.integer.value[1] & 1; snd_i2c_lock(tea->bus); oval1 = tea->regs[TEA6330T_SADDR_VOLUME_LEFT] == 0 ? 0 : 1; oval2 = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] == 0 ? 0 : 1; change = val1 != oval1 || val2 != oval2; tea->regs[TEA6330T_SADDR_VOLUME_LEFT] = val1 ? tea->mleft : 0; tea->regs[TEA6330T_SADDR_VOLUME_RIGHT] = val2 ? tea->mright : 0; bytes[0] = TEA6330T_SADDR_VOLUME_LEFT; bytes[1] = tea->regs[TEA6330T_SADDR_VOLUME_LEFT]; bytes[2] = tea->regs[TEA6330T_SADDR_VOLUME_RIGHT]; if ((err = snd_i2c_sendbytes(tea->device, bytes, 3)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_BASS(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_bass, \ .get = snd_tea6330t_get_bass, .put = snd_tea6330t_put_bass } static int snd_tea6330t_info_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = tea->max_bass; return 0; } static int snd_tea6330t_get_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = tea->bass; return 0; } static int snd_tea6330t_put_bass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[2]; unsigned char val1; val1 = ucontrol->value.integer.value[0] % (tea->max_bass + 1); snd_i2c_lock(tea->bus); tea->bass = val1; val1 += tea->equalizer ? 7 : 3; change = tea->regs[TEA6330T_SADDR_BASS] != val1; bytes[0] = TEA6330T_SADDR_BASS; bytes[1] = tea->regs[TEA6330T_SADDR_BASS] = val1; if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } #define TEA6330T_TREBLE(xname, xindex) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_tea6330t_info_treble, \ .get = snd_tea6330t_get_treble, .put = snd_tea6330t_put_treble } static int snd_tea6330t_info_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = tea->max_treble; return 0; } static int snd_tea6330t_get_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = tea->treble; return 0; } static int snd_tea6330t_put_treble(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct tea6330t *tea = snd_kcontrol_chip(kcontrol); int change, err; unsigned char bytes[2]; unsigned char val1; val1 = ucontrol->value.integer.value[0] % (tea->max_treble + 1); snd_i2c_lock(tea->bus); tea->treble = val1; val1 += 3; change = tea->regs[TEA6330T_SADDR_TREBLE] != val1; bytes[0] = TEA6330T_SADDR_TREBLE; bytes[1] = tea->regs[TEA6330T_SADDR_TREBLE] = val1; if ((err = snd_i2c_sendbytes(tea->device, bytes, 2)) < 0) change = err; snd_i2c_unlock(tea->bus); return change; } static struct snd_kcontrol_new snd_tea6330t_controls[] = { TEA6330T_MASTER_SWITCH("Master Playback Switch", 0), TEA6330T_MASTER_VOLUME("Master Playback Volume", 0), TEA6330T_BASS("Tone Control - Bass", 0), TEA6330T_TREBLE("Tone Control - Treble", 0) }; static void snd_tea6330_free(struct snd_i2c_device *device) { kfree(device->private_data); } int snd_tea6330t_update_mixer(struct snd_card *card, struct snd_i2c_bus *bus, int equalizer, int fader) { struct snd_i2c_device *device; struct tea6330t *tea; struct snd_kcontrol_new *knew; unsigned int idx; int err = -ENOMEM; u8 default_treble, default_bass; unsigned char bytes[7]; tea = kzalloc(sizeof(*tea), GFP_KERNEL); if (tea == NULL) return -ENOMEM; if ((err = snd_i2c_device_create(bus, "TEA6330T", TEA6330T_ADDR, &device)) < 0) { kfree(tea); return err; } tea->device = device; tea->bus = bus; tea->equalizer = equalizer; tea->fader = fader; device->private_data = tea; device->private_free = snd_tea6330_free; snd_i2c_lock(bus); /* turn fader off and handle equalizer */ tea->regs[TEA6330T_SADDR_FADER] = 0x3f; tea->regs[TEA6330T_SADDR_AUDIO_SWITCH] = equalizer ? 0 : TEA6330T_EQN; /* initialize mixer */ if (!tea->equalizer) { tea->max_bass = 9; tea->max_treble = 8; default_bass = 3 + 4; tea->bass = 4; default_treble = 3 + 4; tea->treble = 4; } else { tea->max_bass = 5; tea->max_treble = 0; default_bass = 7 + 4; tea->bass = 4; default_treble = 3; tea->treble = 0; } tea->mleft = tea->mright = 0x14; tea->regs[TEA6330T_SADDR_BASS] = default_bass; tea->regs[TEA6330T_SADDR_TREBLE] = default_treble; /* compose I2C message and put the hardware to initial state */ bytes[0] = TEA6330T_SADDR_VOLUME_LEFT; for (idx = 0; idx < 6; idx++) bytes[idx+1] = tea->regs[idx]; if ((err = snd_i2c_sendbytes(device, bytes, 7)) < 0) goto __error; strcat(card->mixername, ",TEA6330T"); if ((err = snd_component_add(card, "TEA6330T")) < 0) goto __error; for (idx = 0; idx < ARRAY_SIZE(snd_tea6330t_controls); idx++) { knew = &snd_tea6330t_controls[idx]; if (tea->treble == 0 && !strcmp(knew->name, "Tone Control - Treble")) continue; if ((err = snd_ctl_add(card, snd_ctl_new1(knew, tea))) < 0) goto __error; } snd_i2c_unlock(bus); return 0; __error: snd_i2c_unlock(bus); snd_i2c_device_free(device); return err; } EXPORT_SYMBOL(snd_tea6330t_detect); EXPORT_SYMBOL(snd_tea6330t_update_mixer); /* * INIT part */ static int __init alsa_tea6330t_init(void) { return 0; } static void __exit alsa_tea6330t_exit(void) { } module_init(alsa_tea6330t_init) module_exit(alsa_tea6330t_exit)
gpl-2.0
linux-wpan/linux-wpan
kernel/user.c
224
5420
/* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/proc_ns.h> /* * userns count is 1 for root user, 1 for init_uts_ns, * and 1 for... ? */ struct user_namespace init_user_ns = { .uid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, }, .gid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, }, .projid_map = { .nr_extents = 1, .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, }, .count = ATOMIC_INIT(3), .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .proc_inum = PROC_USER_INIT_INO, #ifdef CONFIG_PERSISTENT_KEYRINGS .persistent_keyring_register_sem = __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), #endif }; EXPORT_SYMBOL_GPL(init_user_ns); /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) #define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) static struct kmem_cache *uid_cachep; struct hlist_head uidhash_table[UIDHASH_SZ]; /* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. * But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. */ static DEFINE_SPINLOCK(uidhash_lock); /* root_user.__count is 1, for init task cred */ struct user_struct root_user = { .__count = ATOMIC_INIT(1), .processes = ATOMIC_INIT(1), .sigpending = ATOMIC_INIT(0), .locked_shm = 0, .uid = GLOBAL_ROOT_UID, }; /* * These routines must be called with the uidhash spinlock held! */ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) { hlist_add_head(&up->uidhash_node, hashent); } static void uid_hash_remove(struct user_struct *up) { hlist_del_init(&up->uidhash_node); } static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) { struct user_struct *user; hlist_for_each_entry(user, hashent, uidhash_node) { if (uid_eq(user->uid, uid)) { atomic_inc(&user->__count); return user; } } return NULL; } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); } /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(kuid_t uid) { struct user_struct *ret; unsigned long flags; spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(uid)); spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { unsigned long flags; if (!up) return; local_irq_save(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else local_irq_restore(flags); } struct user_struct *alloc_uid(kuid_t uid) { struct hlist_head *hashent = uidhashentry(uid); struct user_struct *up, *new; spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); spin_unlock_irq(&uidhash_lock); if (!up) { new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) goto out_unlock; new->uid = uid; atomic_set(&new->__count, 1); /* * Before adding this, check whether we raced * on adding the same user already.. */ spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } spin_unlock_irq(&uidhash_lock); } return up; out_unlock: return NULL; } static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); spin_unlock_irq(&uidhash_lock); return 0; } subsys_initcall(uid_cache_init);
gpl-2.0
gototem/kernel
drivers/gpu/drm/drm_edid.c
224
90489
/* * Copyright (c) 2006 Luc Verhaegen (quirks list) * Copyright (c) 2007-2008 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * Copyright 2010 Red Hat, Inc. * * DDC probing routines (drm_ddc_read & drm_do_probe_ddc_edid) originally from * FB layer. * Copyright (C) 2006 Dennis Munsie <dmunsie@cecropia.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/drm_edid.h> #define version_greater(edid, maj, min) \ (((edid)->version > (maj)) || \ ((edid)->version == (maj) && (edid)->revision > (min))) #define EDID_EST_TIMINGS 16 #define EDID_STD_TIMINGS 8 #define EDID_DETAILED_TIMINGS 4 /* * EDID blocks out in the wild have a variety of bugs, try to collect * them here (note that userspace may work around broken monitors first, * but fixes should make their way here so that the kernel "just works" * on as many displays as possible). */ /* First detailed mode wrong, use largest 60Hz mode */ #define EDID_QUIRK_PREFER_LARGE_60 (1 << 0) /* Reported 135MHz pixel clock is too high, needs adjustment */ #define EDID_QUIRK_135_CLOCK_TOO_HIGH (1 << 1) /* Prefer the largest mode at 75 Hz */ #define EDID_QUIRK_PREFER_LARGE_75 (1 << 2) /* Detail timing is in cm not mm */ #define EDID_QUIRK_DETAILED_IN_CM (1 << 3) /* Detailed timing descriptors have bogus size values, so just take the * maximum size and use that. */ #define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4) /* Monitor forgot to set the first detailed is preferred bit. */ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) /* Force reduced-blanking timings for detailed modes */ #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7) struct detailed_mode_closure { struct drm_connector *connector; struct edid *edid; bool preferred; u32 quirks; int modes; }; #define LEVEL_DMT 0 #define LEVEL_GTF 1 #define LEVEL_GTF2 2 #define LEVEL_CVT 3 static struct edid_quirk { char vendor[4]; int product_id; u32 quirks; } edid_quirk_list[] = { /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 }, /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, /* Envision Peripherals, Inc. EN-7100e */ { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, /* Envision EN2028 */ { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 }, /* Funai Electronics PM36B */ { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | EDID_QUIRK_DETAILED_IN_CM }, /* LG Philips LCD LP154W01-A5 */ { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, /* Philips 107p5 CRT */ { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Proview AY765C */ { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, /* Samsung SyncMaster 205BW. Note: irony */ { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP }, /* Samsung SyncMaster 22[5-6]BW */ { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 }, { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 }, /* ViewSonic VA2026w */ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, }; /* * Autogenerated from the DMT spec. * This table is copied from xfree86/modes/xf86EdidModes.c. */ static const struct drm_display_mode drm_dmt_modes[] = { /* 640x350@85Hz */ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 350, 382, 385, 445, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x400@85Hz */ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, 736, 832, 0, 400, 401, 404, 445, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@85Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, 828, 936, 0, 400, 401, 404, 446, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 489, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 492, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@85Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, 752, 832, 0, 480, 481, 484, 509, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@85Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, 896, 1048, 0, 600, 601, 604, 631, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@120Hz RB */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, 880, 960, 0, 600, 603, 607, 636, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 848x480@60Hz */ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, 976, 1088, 0, 480, 486, 494, 517, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@43Hz, interlace */ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 1208, 1264, 0, 768, 768, 772, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@85Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, 1168, 1376, 0, 768, 769, 772, 808, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@120Hz RB */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, 1104, 1184, 0, 768, 771, 775, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1152x864@75Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@60Hz RB */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, 1360, 1440, 0, 768, 771, 778, 790, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@75Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, 1488, 1696, 0, 768, 771, 778, 805, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x768@85Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, 1496, 1712, 0, 768, 771, 778, 809, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@120Hz RB */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, 1360, 1440, 0, 768, 771, 778, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz RB */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, 1360, 1440, 0, 800, 803, 809, 823, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@60Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1480, 1680, 0, 800, 803, 809, 831, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x800@75Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, 1488, 1696, 0, 800, 803, 809, 838, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x800@85Hz */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, 1496, 1712, 0, 800, 803, 809, 843, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x800@120Hz RB */ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, 1360, 1440, 0, 800, 803, 809, 847, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x960@60Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1488, 1800, 0, 960, 961, 964, 1000, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x960@85Hz */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, 1504, 1728, 0, 960, 961, 964, 1011, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x960@120Hz RB */ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, 1360, 1440, 0, 960, 963, 967, 1017, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1280x1024@60Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@85Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, 1504, 1728, 0, 1024, 1025, 1028, 1072, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@120Hz RB */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, 1360, 1440, 0, 1024, 1027, 1034, 1084, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1360x768@60Hz */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1536, 1792, 0, 768, 771, 777, 795, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1360x768@120Hz RB */ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, 1440, 1520, 0, 768, 771, 776, 813, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1400x1050@60Hz RB */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, 1480, 1560, 0, 1050, 1053, 1057, 1080, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1400x1050@60Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@75Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, 1648, 1896, 0, 1050, 1053, 1057, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@85Hz */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, 1656, 1912, 0, 1050, 1053, 1057, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1400x1050@120Hz RB */ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, 1480, 1560, 0, 1050, 1053, 1057, 1112, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz RB */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, 1520, 1600, 0, 900, 903, 909, 926, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1440x900@60Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1672, 1904, 0, 900, 903, 909, 934, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@75Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, 1688, 1936, 0, 900, 903, 909, 942, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@85Hz */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, 1696, 1952, 0, 900, 903, 909, 948, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1440x900@120Hz RB */ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, 1520, 1600, 0, 900, 903, 909, 953, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1600x1200@60Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@65Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@70Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@75Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@85Hz */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1600x1200@120Hz RB */ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, 1680, 1760, 0, 1200, 1203, 1207, 1271, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz RB */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, 1760, 1840, 0, 1050, 1053, 1059, 1080, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1680x1050@60Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@75Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, 1976, 2272, 0, 1050, 1053, 1059, 1099, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@85Hz */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, 1984, 2288, 0, 1050, 1053, 1059, 1105, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1680x1050@120Hz RB */ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, 1760, 1840, 0, 1050, 1053, 1059, 1112, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1792x1344@60Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1792x1344@75Hz */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, 2104, 2456, 0, 1344, 1345, 1348, 1417, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1792x1344@120Hz RB */ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, 1872, 1952, 0, 1344, 1347, 1351, 1423, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1856x1392@60Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1856x1392@75Hz */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, 2208, 2560, 0, 1392, 1395, 1399, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1856x1392@120Hz RB */ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, 1936, 2016, 0, 1392, 1395, 1399, 1474, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz RB */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, 2000, 2080, 0, 1200, 1203, 1209, 1235, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@75Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, 2264, 2608, 0, 1200, 1203, 1209, 1255, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@85Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, 2272, 2624, 0, 1200, 1203, 1209, 1262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@120Hz RB */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, 2000, 2080, 0, 1200, 1203, 1209, 1271, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1920x1440@60Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1440@75Hz */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, 2288, 2640, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1440@120Hz RB */ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, 2000, 2080, 0, 1440, 1443, 1447, 1525, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz RB */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, 2640, 2720, 0, 1600, 1603, 1609, 1646, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@75HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1672, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@85HZ */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 2560x1600@120Hz RB */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, 2640, 2720, 0, 1600, 1603, 1609, 1694, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, }; static const struct drm_display_mode edid_est_modes[] = { { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, 896, 1024, 0, 600, 601, 603, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, 846, 900, 0, 400, 421, 423, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, 846, 900, 0, 400, 412, 414, 449, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, 1184, 1328, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1184, 1344, 0, 768, 771, 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, 928, 1152, 0, 624, 625, 628, 667, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, 896, 1056, 0, 600, 601, 604, 625, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, 976, 1040, 0, 600, 637, 643, 666, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ }; struct minimode { short w; short h; short r; short rb; }; static const struct minimode est3_modes[] = { /* byte 6 */ { 640, 350, 85, 0 }, { 640, 400, 85, 0 }, { 720, 400, 85, 0 }, { 640, 480, 85, 0 }, { 848, 480, 60, 0 }, { 800, 600, 85, 0 }, { 1024, 768, 85, 0 }, { 1152, 864, 75, 0 }, /* byte 7 */ { 1280, 768, 60, 1 }, { 1280, 768, 60, 0 }, { 1280, 768, 75, 0 }, { 1280, 768, 85, 0 }, { 1280, 960, 60, 0 }, { 1280, 960, 85, 0 }, { 1280, 1024, 60, 0 }, { 1280, 1024, 85, 0 }, /* byte 8 */ { 1360, 768, 60, 0 }, { 1440, 900, 60, 1 }, { 1440, 900, 60, 0 }, { 1440, 900, 75, 0 }, { 1440, 900, 85, 0 }, { 1400, 1050, 60, 1 }, { 1400, 1050, 60, 0 }, { 1400, 1050, 75, 0 }, /* byte 9 */ { 1400, 1050, 85, 0 }, { 1680, 1050, 60, 1 }, { 1680, 1050, 60, 0 }, { 1680, 1050, 75, 0 }, { 1680, 1050, 85, 0 }, { 1600, 1200, 60, 0 }, { 1600, 1200, 65, 0 }, { 1600, 1200, 70, 0 }, /* byte 10 */ { 1600, 1200, 75, 0 }, { 1600, 1200, 85, 0 }, { 1792, 1344, 60, 0 }, { 1792, 1344, 85, 0 }, { 1856, 1392, 60, 0 }, { 1856, 1392, 75, 0 }, { 1920, 1200, 60, 1 }, { 1920, 1200, 60, 0 }, /* byte 11 */ { 1920, 1200, 75, 0 }, { 1920, 1200, 85, 0 }, { 1920, 1440, 60, 0 }, { 1920, 1440, 75, 0 }, }; static const struct minimode extra_modes[] = { { 1024, 576, 60, 0 }, { 1366, 768, 60, 0 }, { 1600, 900, 60, 0 }, { 1680, 945, 60, 0 }, { 1920, 1080, 60, 0 }, { 2048, 1152, 60, 0 }, { 2048, 1536, 60, 0 }, }; /* * Probably taken from CEA-861 spec. * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. */ static const struct drm_display_mode edid_cea_modes[] = { /* 1 - 640x480@60Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 2 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 3 - 720x480@60Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 4 - 1280x720@60Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 60, }, /* 5 - 1920x1080i@60Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 6 - 1440x480i@60Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 7 - 1440x480i@60Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 8 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 9 - 1440x240@60Hz */ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 1602, 1716, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 60, }, /* 10 - 2880x480i@60Hz */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 11 - 2880x480i@60Hz */ { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 60, }, /* 12 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 13 - 2880x240@60Hz */ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 3204, 3432, 0, 240, 244, 247, 262, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 14 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 15 - 1440x480@60Hz */ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, 1596, 1716, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 16 - 1920x1080@60Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 60, }, /* 17 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 18 - 720x576@50Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 19 - 1280x720@50Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 50, }, /* 20 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 21 - 1440x576i@50Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 22 - 1440x576i@50Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 23 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 24 - 1440x288@50Hz */ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 1590, 1728, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 50, }, /* 25 - 2880x576i@50Hz */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 26 - 2880x576i@50Hz */ { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 27 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 28 - 2880x288@50Hz */ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 3180, 3456, 0, 288, 290, 293, 312, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 29 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 30 - 1440x576@50Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1592, 1728, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 31 - 1920x1080@50Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 50, }, /* 32 - 1920x1080@24Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 24, }, /* 33 - 1920x1080@25Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 25, }, /* 34 - 1920x1080@30Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 30, }, /* 35 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 36 - 2880x480@60Hz */ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, 3192, 3432, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 60, }, /* 37 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 38 - 2880x576@50Hz */ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, 3184, 3456, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 50, }, /* 39 - 1920x1080i@50Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 50, }, /* 40 - 1920x1080i@100Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 100, }, /* 41 - 1280x720@100Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, 1760, 1980, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, }, /* 42 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 100, }, /* 43 - 720x576@100Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 100, }, /* 44 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 100, }, /* 45 - 1440x576i@100Hz */ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_DBLCLK), .vrefresh = 100, }, /* 46 - 1920x1080i@120Hz */ { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE), .vrefresh = 120, }, /* 47 - 1280x720@120Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, 1430, 1650, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 120, }, /* 48 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 120, }, /* 49 - 720x480@120Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 120, }, /* 50 - 1440x480i@120Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 120, }, /* 51 - 1440x480i@120Hz */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 120, }, /* 52 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 200, }, /* 53 - 720x576@200Hz */ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, 796, 864, 0, 576, 581, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 200, }, /* 54 - 1440x576i@200Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 200, }, /* 55 - 1440x576i@200Hz */ { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 1590, 1728, 0, 576, 580, 586, 625, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 200, }, /* 56 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 240, }, /* 57 - 720x480@240Hz */ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, 798, 858, 0, 480, 489, 495, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), .vrefresh = 240, }, /* 58 - 1440x480i@240 */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 240, }, /* 59 - 1440x480i@240 */ { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 1602, 1716, 0, 480, 488, 494, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK), .vrefresh = 240, }, /* 60 - 1280x720@24Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 24, }, /* 61 - 1280x720@25Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, 3740, 3960, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 25, }, /* 62 - 1280x720@30Hz */ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, 3080, 3300, 0, 720, 725, 730, 750, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 30, }, /* 63 - 1920x1080@120Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 120, }, /* 64 - 1920x1080@100Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, }, }; /*** DDC fetch and block validation ***/ static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; /* * Sanity check the header of the base EDID block. Return 8 if the header * is perfect, down to 0 if it's totally wrong. */ int drm_edid_header_is_valid(const u8 *raw_edid) { int i, score = 0; for (i = 0; i < sizeof(edid_header); i++) if (raw_edid[i] == edid_header[i]) score++; return score; } EXPORT_SYMBOL(drm_edid_header_is_valid); static int edid_fixup __read_mostly = 6; module_param_named(edid_fixup, edid_fixup, int, 0400); MODULE_PARM_DESC(edid_fixup, "Minimum number of valid EDID header bytes (0-8, default 6)"); /* * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. */ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) { int i; u8 csum = 0; struct edid *edid = (struct edid *)raw_edid; if (edid_fixup > 8 || edid_fixup < 0) edid_fixup = 6; if (block == 0) { int score = drm_edid_header_is_valid(raw_edid); if (score == 8) ; else if (score >= edid_fixup) { DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); memcpy(raw_edid, edid_header, sizeof(edid_header)); } else { goto bad; } } for (i = 0; i < EDID_LENGTH; i++) csum += raw_edid[i]; if (csum) { if (print_bad_edid) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); } /* allow CEA to slide through, switches mangle this */ if (raw_edid[0] != 0x02) goto bad; } /* per-block-type checks */ switch (raw_edid[0]) { case 0: /* base */ if (edid->version != 1) { DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); goto bad; } if (edid->revision > 4) DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); break; default: break; } return 1; bad: if (raw_edid && print_bad_edid) { printk(KERN_ERR "Raw EDID:\n"); print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, raw_edid, EDID_LENGTH, false); } return 0; } EXPORT_SYMBOL(drm_edid_block_valid); /** * drm_edid_is_valid - sanity check EDID data * @edid: EDID data * * Sanity-check an entire EDID record (including extensions) */ bool drm_edid_is_valid(struct edid *edid) { int i; u8 *raw = (u8 *)edid; if (!edid) return false; for (i = 0; i <= edid->extensions; i++) if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true)) return false; return true; } EXPORT_SYMBOL(drm_edid_is_valid); #define DDC_SEGMENT_ADDR 0x30 /** * Get EDID information via I2C. * * \param adapter : i2c device adaptor * \param buf : EDID data buffer to be filled * \param len : EDID data buffer length * \return 0 on success or -1 on failure. * * Try to fetch EDID information by calling i2c driver function. */ static int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, int block, int len) { unsigned char start = block * EDID_LENGTH; unsigned char segment = block >> 1; unsigned char xfers = segment ? 3 : 2; int ret, retries = 5; /* The core i2c driver will automatically retry the transfer if the * adapter reports EAGAIN. However, we find that bit-banging transfers * are susceptible to errors under a heavily loaded machine and * generate spurious NAKs and timeouts. Retrying the transfer * of the individual block a few times seems to overcome this. */ do { struct i2c_msg msgs[] = { { .addr = DDC_SEGMENT_ADDR, .flags = 0, .len = 1, .buf = &segment, }, { .addr = DDC_ADDR, .flags = 0, .len = 1, .buf = &start, }, { .addr = DDC_ADDR, .flags = I2C_M_RD, .len = len, .buf = buf, } }; /* * Avoid sending the segment addr to not upset non-compliant ddc * monitors. */ ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers); if (ret == -ENXIO) { DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", adapter->name); break; } } while (ret != xfers && --retries); return ret == xfers ? 0 : -1; } static bool drm_edid_is_zero(u8 *in_edid, int length) { if (memchr_inv(in_edid, 0, length)) return false; return true; } static u8 * drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { int i, j = 0, valid_extensions = 0; u8 *block, *new; bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) return NULL; /* base block fetch */ for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) goto out; if (drm_edid_block_valid(block, 0, print_bad_edid)) break; if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { connector->null_edid_counter++; goto carp; } } if (i == 4) goto carp; /* if there's no extensions, we're done */ if (block[0x7e] == 0) return block; new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; block = new; for (j = 1; j <= block[0x7e]; j++) { for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, block + (valid_extensions + 1) * EDID_LENGTH, j, EDID_LENGTH)) goto out; if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) { valid_extensions++; break; } } if (i == 4 && print_bad_edid) { dev_warn(connector->dev->dev, "%s: Ignoring invalid EDID block %d.\n", drm_get_connector_name(connector), j); connector->bad_edid_counter++; } } if (valid_extensions != block[0x7e]) { block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; block[0x7e] = valid_extensions; new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); if (!new) goto out; block = new; } return block; carp: if (print_bad_edid) { dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", drm_get_connector_name(connector), j); } connector->bad_edid_counter++; out: kfree(block); return NULL; } /** * Probe DDC presence. * * \param adapter : i2c device adaptor * \return 1 on success */ bool drm_probe_ddc(struct i2c_adapter *adapter) { unsigned char out; return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0); } EXPORT_SYMBOL(drm_probe_ddc); /** * drm_get_edid - get EDID data, if available * @connector: connector we're probing * @adapter: i2c adapter to use for DDC * * Poke the given i2c channel to grab EDID data if possible. If found, * attach it to the connector. * * Return edid data or NULL if we couldn't find any. */ struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid = NULL; if (drm_probe_ddc(adapter)) edid = (struct edid *)drm_do_get_edid(connector, adapter); return edid; } EXPORT_SYMBOL(drm_get_edid); /*** EDID parsing ***/ /** * edid_vendor - match a string against EDID's obfuscated vendor field * @edid: EDID to match * @vendor: vendor string * * Returns true if @vendor is in @edid, false otherwise */ static bool edid_vendor(struct edid *edid, char *vendor) { char edid_vendor[3]; edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@'; edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) | ((edid->mfg_id[1] & 0xe0) >> 5)) + '@'; edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@'; return !strncmp(edid_vendor, vendor, 3); } /** * edid_get_quirks - return quirk flags for a given EDID * @edid: EDID to process * * This tells subsequent routines what fixes they need to apply. */ static u32 edid_get_quirks(struct edid *edid) { struct edid_quirk *quirk; int i; for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) { quirk = &edid_quirk_list[i]; if (edid_vendor(edid, quirk->vendor) && (EDID_PRODUCT_ID(edid) == quirk->product_id)) return quirk->quirks; } return 0; } #define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay) #define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh)) /** * edid_fixup_preferred - set preferred modes based on quirk list * @connector: has mode list to fix up * @quirks: quirks list * * Walk the mode list for @connector, clearing the preferred status * on existing modes and setting it anew for the right mode ala @quirks. */ static void edid_fixup_preferred(struct drm_connector *connector, u32 quirks) { struct drm_display_mode *t, *cur_mode, *preferred_mode; int target_refresh = 0; if (list_empty(&connector->probed_modes)) return; if (quirks & EDID_QUIRK_PREFER_LARGE_60) target_refresh = 60; if (quirks & EDID_QUIRK_PREFER_LARGE_75) target_refresh = 75; preferred_mode = list_first_entry(&connector->probed_modes, struct drm_display_mode, head); list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { cur_mode->type &= ~DRM_MODE_TYPE_PREFERRED; if (cur_mode == preferred_mode) continue; /* Largest mode is preferred */ if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode)) preferred_mode = cur_mode; /* At a given size, try to get closest to target refresh */ if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) && MODE_REFRESH_DIFF(cur_mode, target_refresh) < MODE_REFRESH_DIFF(preferred_mode, target_refresh)) { preferred_mode = cur_mode; } } preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; } static bool mode_is_rb(const struct drm_display_mode *mode) { return (mode->htotal - mode->hdisplay == 160) && (mode->hsync_end - mode->hdisplay == 80) && (mode->hsync_end - mode->hsync_start == 32) && (mode->vsync_start - mode->vdisplay == 3); } /* * drm_mode_find_dmt - Create a copy of a mode if present in DMT * @dev: Device to duplicate against * @hsize: Mode width * @vsize: Mode height * @fresh: Mode refresh rate * @rb: Mode reduced-blanking-ness * * Walk the DMT mode list looking for a match for the given parameters. * Return a newly allocated copy of the mode, or NULL if not found. */ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb) { int i; for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; if (hsize != ptr->hdisplay) continue; if (vsize != ptr->vdisplay) continue; if (fresh != drm_mode_vrefresh(ptr)) continue; if (rb != mode_is_rb(ptr)) continue; return drm_mode_duplicate(dev, ptr); } return NULL; } EXPORT_SYMBOL(drm_mode_find_dmt); typedef void detailed_cb(struct detailed_timing *timing, void *closure); static void cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { int i, n = 0; u8 d = ext[0x02]; u8 *det_base = ext + d; n = (127 - d) / 18; for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure) { unsigned int i, n = min((int)ext[0x02], 6); u8 *det_base = ext + 5; if (ext[0x01] != 1) return; /* unknown version */ for (i = 0; i < n; i++) cb((struct detailed_timing *)(det_base + 18 * i), closure); } static void drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure) { int i; struct edid *edid = (struct edid *)raw_edid; if (edid == NULL) return; for (i = 0; i < EDID_DETAILED_TIMINGS; i++) cb(&(edid->detailed_timings[i]), closure); for (i = 1; i <= raw_edid[0x7e]; i++) { u8 *ext = raw_edid + (i * EDID_LENGTH); switch (*ext) { case CEA_EXT: cea_for_each_detailed_block(ext, cb, closure); break; case VTB_EXT: vtb_for_each_detailed_block(ext, cb, closure); break; default: break; } } } static void is_rb(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE) if (r[15] & 0x10) *(bool *)data = true; } /* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */ static bool drm_monitor_supports_rb(struct edid *edid) { if (edid->revision >= 4) { bool ret = false; drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); return ret; } return ((edid->input & DRM_EDID_INPUT_DIGITAL) != 0); } static void find_gtf2(struct detailed_timing *t, void *data) { u8 *r = (u8 *)t; if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02) *(u8 **)data = r; } /* Secondary GTF curve kicks in above some break frequency */ static int drm_gtf2_hbreak(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[12] * 2) : 0; } static int drm_gtf2_2c(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[13] : 0; } static int drm_gtf2_m(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? (r[15] << 8) + r[14] : 0; } static int drm_gtf2_k(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[16] : 0; } static int drm_gtf2_2j(struct edid *edid) { u8 *r = NULL; drm_for_each_detailed_block((u8 *)edid, find_gtf2, &r); return r ? r[17] : 0; } /** * standard_timing_level - get std. timing level(CVT/GTF/DMT) * @edid: EDID block to scan */ static int standard_timing_level(struct edid *edid) { if (edid->revision >= 2) { if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) return LEVEL_CVT; if (drm_gtf2_hbreak(edid)) return LEVEL_GTF2; return LEVEL_GTF; } return LEVEL_DMT; } /* * 0 is reserved. The spec says 0x01 fill for unused timings. Some old * monitors fill with ascii space (0x20) instead. */ static int bad_std_timing(u8 a, u8 b) { return (a == 0x00 && b == 0x00) || (a == 0x01 && b == 0x01) || (a == 0x20 && b == 0x20); } /** * drm_mode_std - convert standard mode info (width, height, refresh) into mode * @t: standard timing params * @timing_level: standard timing level * * Take the standard timing params (in this case width, aspect, and refresh) * and convert them into a real mode using CVT/GTF/DMT. */ static struct drm_display_mode * drm_mode_std(struct drm_connector *connector, struct edid *edid, struct std_timing *t, int revision) { struct drm_device *dev = connector->dev; struct drm_display_mode *m, *mode = NULL; int hsize, vsize; int vrefresh_rate; unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) >> EDID_TIMING_ASPECT_SHIFT; unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) >> EDID_TIMING_VFREQ_SHIFT; int timing_level = standard_timing_level(edid); if (bad_std_timing(t->hsize, t->vfreq_aspect)) return NULL; /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ hsize = t->hsize * 8 + 248; /* vrefresh_rate = vfreq + 60 */ vrefresh_rate = vfreq + 60; /* the vdisplay is calculated based on the aspect ratio */ if (aspect_ratio == 0) { if (revision < 3) vsize = hsize; else vsize = (hsize * 10) / 16; } else if (aspect_ratio == 1) vsize = (hsize * 3) / 4; else if (aspect_ratio == 2) vsize = (hsize * 4) / 5; else vsize = (hsize * 9) / 16; /* HDTV hack, part 1 */ if (vrefresh_rate == 60 && ((hsize == 1360 && vsize == 765) || (hsize == 1368 && vsize == 769))) { hsize = 1366; vsize = 768; } /* * If this connector already has a mode for this size and refresh * rate (because it came from detailed or CVT info), use that * instead. This way we don't have to guess at interlace or * reduced blanking. */ list_for_each_entry(m, &connector->probed_modes, head) if (m->hdisplay == hsize && m->vdisplay == vsize && drm_mode_vrefresh(m) == vrefresh_rate) return NULL; /* HDTV hack, part 2 */ if (hsize == 1366 && vsize == 768 && vrefresh_rate == 60) { mode = drm_cvt_mode(dev, 1366, 768, vrefresh_rate, 0, 0, false); mode->hdisplay = 1366; mode->hsync_start = mode->hsync_start - 1; mode->hsync_end = mode->hsync_end - 1; return mode; } /* check whether it can be found in default mode table */ if (drm_monitor_supports_rb(edid)) { mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, true); if (mode) return mode; } mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false); if (mode) return mode; /* okay, generate it */ switch (timing_level) { case LEVEL_DMT: break; case LEVEL_GTF: mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); break; case LEVEL_GTF2: /* * This is potentially wrong if there's ever a monitor with * more than one ranges section, each claiming a different * secondary GTF curve. Please don't do that. */ mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); if (!mode) return NULL; if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) { drm_mode_destroy(dev, mode); mode = drm_gtf_mode_complex(dev, hsize, vsize, vrefresh_rate, 0, 0, drm_gtf2_m(edid), drm_gtf2_2c(edid), drm_gtf2_k(edid), drm_gtf2_2j(edid)); } break; case LEVEL_CVT: mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0, false); break; } return mode; } /* * EDID is delightfully ambiguous about how interlaced modes are to be * encoded. Our internal representation is of frame height, but some * HDTV detailed timings are encoded as field height. * * The format list here is from CEA, in frame size. Technically we * should be checking refresh rate too. Whatever. */ static void drm_mode_do_interlace_quirk(struct drm_display_mode *mode, struct detailed_pixel_timing *pt) { int i; static const struct { int w, h; } cea_interlaced[] = { { 1920, 1080 }, { 720, 480 }, { 1440, 480 }, { 2880, 480 }, { 720, 576 }, { 1440, 576 }, { 2880, 576 }, }; if (!(pt->misc & DRM_EDID_PT_INTERLACED)) return; for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) { if ((mode->hdisplay == cea_interlaced[i].w) && (mode->vdisplay == cea_interlaced[i].h / 2)) { mode->vdisplay *= 2; mode->vsync_start *= 2; mode->vsync_end *= 2; mode->vtotal *= 2; mode->vtotal |= 1; } } mode->flags |= DRM_MODE_FLAG_INTERLACE; } /** * drm_mode_detailed - create a new mode from an EDID detailed timing section * @dev: DRM device (needed to create new mode) * @edid: EDID block * @timing: EDID detailed timing info * @quirks: quirks to apply * * An EDID detailed timing block contains enough info for us to create and * return a new struct drm_display_mode. */ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct edid *edid, struct detailed_timing *timing, u32 quirks) { struct drm_display_mode *mode; struct detailed_pixel_timing *pt = &timing->data.pixel_data; unsigned hactive = (pt->hactive_hblank_hi & 0xf0) << 4 | pt->hactive_lo; unsigned vactive = (pt->vactive_vblank_hi & 0xf0) << 4 | pt->vactive_lo; unsigned hblank = (pt->hactive_hblank_hi & 0xf) << 8 | pt->hblank_lo; unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); /* ignore tiny modes */ if (hactive < 64 || vactive < 64) return NULL; if (pt->misc & DRM_EDID_PT_STEREO) { printk(KERN_WARNING "stereo mode not supported\n"); return NULL; } if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { printk(KERN_WARNING "composite sync not supported\n"); } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { DRM_DEBUG_KMS("Incorrect Detailed timing. " "Wrong Hsync/Vsync pulse width\n"); return NULL; } if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) { mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false); if (!mode) return NULL; goto set_size; } mode = drm_mode_create(dev); if (!mode) return NULL; if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH) timing->pixel_clock = cpu_to_le16(1088); mode->clock = le16_to_cpu(timing->pixel_clock) * 10; mode->hdisplay = hactive; mode->hsync_start = mode->hdisplay + hsync_offset; mode->hsync_end = mode->hsync_start + hsync_pulse_width; mode->htotal = mode->hdisplay + hblank; mode->vdisplay = vactive; mode->vsync_start = mode->vdisplay + vsync_offset; mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vtotal = mode->vdisplay + vblank; /* Some EDIDs have bogus h/vtotal values */ if (mode->hsync_end > mode->htotal) mode->htotal = mode->hsync_end + 1; if (mode->vsync_end > mode->vtotal) mode->vtotal = mode->vsync_end + 1; drm_mode_do_interlace_quirk(mode, pt); if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) { pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE; } mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; set_size: mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4; mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8; if (quirks & EDID_QUIRK_DETAILED_IN_CM) { mode->width_mm *= 10; mode->height_mm *= 10; } if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) { mode->width_mm = edid->width_cm * 10; mode->height_mm = edid->height_cm * 10; } mode->type = DRM_MODE_TYPE_DRIVER; mode->vrefresh = drm_mode_vrefresh(mode); drm_mode_set_name(mode); return mode; } static bool mode_in_hsync_range(const struct drm_display_mode *mode, struct edid *edid, u8 *t) { int hsync, hmin, hmax; hmin = t[7]; if (edid->revision >= 4) hmin += ((t[4] & 0x04) ? 255 : 0); hmax = t[8]; if (edid->revision >= 4) hmax += ((t[4] & 0x08) ? 255 : 0); hsync = drm_mode_hsync(mode); return (hsync <= hmax && hsync >= hmin); } static bool mode_in_vsync_range(const struct drm_display_mode *mode, struct edid *edid, u8 *t) { int vsync, vmin, vmax; vmin = t[5]; if (edid->revision >= 4) vmin += ((t[4] & 0x01) ? 255 : 0); vmax = t[6]; if (edid->revision >= 4) vmax += ((t[4] & 0x02) ? 255 : 0); vsync = drm_mode_vrefresh(mode); return (vsync <= vmax && vsync >= vmin); } static u32 range_pixel_clock(struct edid *edid, u8 *t) { /* unspecified */ if (t[9] == 0 || t[9] == 255) return 0; /* 1.4 with CVT support gives us real precision, yay */ if (edid->revision >= 4 && t[10] == 0x04) return (t[9] * 10000) - ((t[12] >> 2) * 250); /* 1.3 is pathetic, so fuzz up a bit */ return t[9] * 10000 + 5001; } static bool mode_in_range(const struct drm_display_mode *mode, struct edid *edid, struct detailed_timing *timing) { u32 max_clock; u8 *t = (u8 *)timing; if (!mode_in_hsync_range(mode, edid, t)) return false; if (!mode_in_vsync_range(mode, edid, t)) return false; if ((max_clock = range_pixel_clock(edid, t))) if (mode->clock > max_clock) return false; /* 1.4 max horizontal check */ if (edid->revision >= 4 && t[10] == 0x04) if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3)))) return false; if (mode_is_rb(mode) && !drm_monitor_supports_rb(edid)) return false; return true; } static bool valid_inferred_mode(const struct drm_connector *connector, const struct drm_display_mode *mode) { struct drm_display_mode *m; bool ok = false; list_for_each_entry(m, &connector->probed_modes, head) { if (mode->hdisplay == m->hdisplay && mode->vdisplay == m->vdisplay && drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) return false; /* duplicated */ if (mode->hdisplay <= m->hdisplay && mode->vdisplay <= m->vdisplay) ok = true; } return ok; } static int drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { if (mode_in_range(drm_dmt_modes + i, edid, timing) && valid_inferred_mode(connector, drm_dmt_modes + i)) { newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } return modes; } /* fix up 1366x768 mode from 1368x768; * GFT/CVT can't express 1366 width which isn't dividable by 8 */ static void fixup_mode_1366x768(struct drm_display_mode *mode) { if (mode->hdisplay == 1368 && mode->vdisplay == 768) { mode->hdisplay = 1366; mode->hsync_start--; mode->hsync_end--; drm_mode_set_name(mode); } } static int drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); if (!newmode) return modes; fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing) || !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } drm_mode_probed_add(connector, newmode); modes++; } return modes; } static int drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) { int i, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; bool rb = drm_monitor_supports_rb(edid); for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { const struct minimode *m = &extra_modes[i]; newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); if (!newmode) return modes; fixup_mode_1366x768(newmode); if (!mode_in_range(newmode, edid, timing) || !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } drm_mode_probed_add(connector, newmode); modes++; } return modes; } static void do_inferred_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; struct detailed_data_monitor_range *range = &data->data.range; if (data->type != EDID_DETAIL_MONITOR_RANGE) return; closure->modes += drm_dmt_modes_for_range(closure->connector, closure->edid, timing); if (!version_greater(closure->edid, 1, 1)) return; /* GTF not defined yet */ switch (range->flags) { case 0x02: /* secondary gtf, XXX could do more */ case 0x00: /* default gtf */ closure->modes += drm_gtf_modes_for_range(closure->connector, closure->edid, timing); break; case 0x04: /* cvt, only in 1.4+ */ if (!version_greater(closure->edid, 1, 3)) break; closure->modes += drm_cvt_modes_for_range(closure->connector, closure->edid, timing); break; case 0x01: /* just the ranges, no formula */ default: break; } } static int add_inferred_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_inferred_modes, &closure); return closure.modes; } static int drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, m, modes = 0; struct drm_display_mode *mode; u8 *est = ((u8 *)timing) + 5; for (i = 0; i < 6; i++) { for (j = 7; j > 0; j--) { m = (i * 8) + (7 - j); if (m >= ARRAY_SIZE(est3_modes)) break; if (est[i] & (1 << j)) { mode = drm_mode_find_dmt(connector->dev, est3_modes[m].w, est3_modes[m].h, est3_modes[m].r, est3_modes[m].rb); if (mode) { drm_mode_probed_add(connector, mode); modes++; } } } } return modes; } static void do_established_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_EST_TIMINGS) closure->modes += drm_est3_modes(closure->connector, timing); } /** * add_established_modes - get est. modes from EDID and add them * @edid: EDID block to scan * * Each EDID block contains a bitmap of the supported "established modes" list * (defined above). Tease them out and add them to the global modes list. */ static int add_established_modes(struct drm_connector *connector, struct edid *edid) { struct drm_device *dev = connector->dev; unsigned long est_bits = edid->established_timings.t1 | (edid->established_timings.t2 << 8) | ((edid->established_timings.mfg_rsvd & 0x80) << 9); int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i <= EDID_EST_TIMINGS; i++) { if (est_bits & (1<<i)) { struct drm_display_mode *newmode; newmode = drm_mode_duplicate(dev, &edid_est_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_established_modes, &closure); return modes + closure.modes; } static void do_standard_modes(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; struct drm_connector *connector = closure->connector; struct edid *edid = closure->edid; if (data->type == EDID_DETAIL_STD_MODES) { int i; for (i = 0; i < 6; i++) { struct std_timing *std; struct drm_display_mode *newmode; std = &data->data.timings[i]; newmode = drm_mode_std(connector, edid, std, edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); closure->modes++; } } } } /** * add_standard_modes - get std. modes from EDID and add them * @edid: EDID block to scan * * Standard modes can be calculated using the appropriate standard (DMT, * GTF or CVT. Grab them from @edid and add them to the list. */ static int add_standard_modes(struct drm_connector *connector, struct edid *edid) { int i, modes = 0; struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; for (i = 0; i < EDID_STD_TIMINGS; i++) { struct drm_display_mode *newmode; newmode = drm_mode_std(connector, edid, &edid->standard_timings[i], edid->revision); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } if (version_greater(edid, 1, 0)) drm_for_each_detailed_block((u8 *)edid, do_standard_modes, &closure); /* XXX should also look for standard codes in VTB blocks */ return modes + closure.modes; } static int drm_cvt_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, modes = 0; struct drm_display_mode *newmode; struct drm_device *dev = connector->dev; struct cvt_timing *cvt; const int rates[] = { 60, 85, 75, 60, 50 }; const u8 empty[3] = { 0, 0, 0 }; for (i = 0; i < 4; i++) { int uninitialized_var(width), height; cvt = &(timing->data.other_data.data.cvt[i]); if (!memcmp(cvt->code, empty, 3)) continue; height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; switch (cvt->code[1] & 0x0c) { case 0x00: width = height * 4 / 3; break; case 0x04: width = height * 16 / 9; break; case 0x08: width = height * 16 / 10; break; case 0x0c: width = height * 15 / 9; break; } for (j = 1; j < 5; j++) { if (cvt->code[2] & (1 << j)) { newmode = drm_cvt_mode(dev, width, height, rates[j], j == 0, false, false); if (newmode) { drm_mode_probed_add(connector, newmode); modes++; } } } } return modes; } static void do_cvt_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct detailed_non_pixel *data = &timing->data.other_data; if (data->type == EDID_DETAIL_CVT_3BYTE) closure->modes += drm_cvt_modes(closure->connector, timing); } static int add_cvt_modes(struct drm_connector *connector, struct edid *edid) { struct detailed_mode_closure closure = { connector, edid, 0, 0, 0 }; if (version_greater(edid, 1, 2)) drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure); /* XXX should also look for CVT codes in VTB blocks */ return closure.modes; } static void do_detailed_mode(struct detailed_timing *timing, void *c) { struct detailed_mode_closure *closure = c; struct drm_display_mode *newmode; if (timing->pixel_clock) { newmode = drm_mode_detailed(closure->connector->dev, closure->edid, timing, closure->quirks); if (!newmode) return; if (closure->preferred) newmode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(closure->connector, newmode); closure->modes++; closure->preferred = 0; } } /* * add_detailed_modes - Add modes from detailed timings * @connector: attached connector * @edid: EDID block to scan * @quirks: quirks to apply */ static int add_detailed_modes(struct drm_connector *connector, struct edid *edid, u32 quirks) { struct detailed_mode_closure closure = { connector, edid, 1, quirks, 0 }; if (closure.preferred && !version_greater(edid, 1, 3)) closure.preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure); return closure.modes; } #define HDMI_IDENTIFIER 0x000C03 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define VIDEO_CAPABILITY_BLOCK 0x07 #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) #define EDID_CEA_VCDB_QS (1 << 6) /** * Search EDID for CEA extension block. */ u8 *drm_find_cea_extension(struct edid *edid) { u8 *edid_ext = NULL; int i; /* No EDID or EDID extensions */ if (edid == NULL || edid->extensions == 0) return NULL; /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); if (edid_ext[0] == CEA_EXT) break; } if (i == edid->extensions) return NULL; return edid_ext; } EXPORT_SYMBOL(drm_find_cea_extension); /** * drm_match_cea_mode - look for a CEA mode matching given mode * @to_match: display mode * * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861 * mode. */ u8 drm_match_cea_mode(const struct drm_display_mode *to_match) { u8 mode; if (!to_match->clock) return 0; for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; unsigned int clock1, clock2; clock1 = clock2 = cea_mode->clock; /* Check both 60Hz and 59.94Hz */ if (cea_mode->vrefresh % 6 == 0) { /* * edid_cea_modes contains the 59.94Hz * variant for 240 and 480 line modes, * and the 60Hz variant otherwise. */ if (cea_mode->vdisplay == 240 || cea_mode->vdisplay == 480) clock1 = clock1 * 1001 / 1000; else clock2 = DIV_ROUND_UP(clock2 * 1000, 1001); } if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && drm_mode_equal_no_clocks(to_match, cea_mode)) return mode + 1; } return 0; } EXPORT_SYMBOL(drm_match_cea_mode); static int do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) { struct drm_device *dev = connector->dev; u8 * mode, cea_mode; int modes = 0; for (mode = db; mode < db + len; mode++) { cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */ if (cea_mode < ARRAY_SIZE(edid_cea_modes)) { struct drm_display_mode *newmode; newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); if (newmode) { newmode->vrefresh = 0; drm_mode_probed_add(connector, newmode); modes++; } } } return modes; } static int cea_db_payload_len(const u8 *db) { return db[0] & 0x1f; } static int cea_db_tag(const u8 *db) { return db[0] >> 5; } static int cea_revision(const u8 *cea) { return cea[1]; } static int cea_db_offsets(const u8 *cea, int *start, int *end) { /* Data block offset in CEA extension block */ *start = 4; *end = cea[2]; if (*end == 0) *end = 127; if (*end < 4 || *end > 127) return -ERANGE; return 0; } #define for_each_cea_db(cea, i, start, end) \ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { u8 * cea = drm_find_cea_extension(edid); u8 * db, dbl; int modes = 0; if (cea && cea_revision(cea) >= 3) { int i, start, end; if (cea_db_offsets(cea, &start, &end)) return 0; for_each_cea_db(cea, i, start, end) { db = &cea[i]; dbl = cea_db_payload_len(db); if (cea_db_tag(db) == VIDEO_BLOCK) modes += do_cea_modes (connector, db+1, dbl); } } return modes; } static void parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) { u8 len = cea_db_payload_len(db); if (len >= 6) { connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ connector->dvi_dual = db[6] & 1; } if (len >= 7) connector->max_tmds_clock = db[7] * 5; if (len >= 8) { connector->latency_present[0] = db[8] >> 7; connector->latency_present[1] = (db[8] >> 6) & 1; } if (len >= 9) connector->video_latency[0] = db[9]; if (len >= 10) connector->audio_latency[0] = db[10]; if (len >= 11) connector->video_latency[1] = db[11]; if (len >= 12) connector->audio_latency[1] = db[12]; DRM_DEBUG_KMS("HDMI: DVI dual %d, " "max TMDS clock %d, " "latency present %d %d, " "video latency %d %d, " "audio latency %d %d\n", connector->dvi_dual, connector->max_tmds_clock, (int) connector->latency_present[0], (int) connector->latency_present[1], connector->video_latency[0], connector->video_latency[1], connector->audio_latency[0], connector->audio_latency[1]); } static void monitor_name(struct detailed_timing *t, void *data) { if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) *(u8 **)data = t->data.other_data.data.str.str; } static bool cea_db_is_hdmi_vsdb(const u8 *db) { int hdmi_id; if (cea_db_tag(db) != VENDOR_BLOCK) return false; if (cea_db_payload_len(db) < 5) return false; hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); return hdmi_id == HDMI_IDENTIFIER; } /** * drm_edid_to_eld - build ELD from EDID * @connector: connector corresponding to the HDMI/DP sink * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. * Some ELD fields are left to the graphics driver caller: * - Conn_Type * - HDCP * - Port_ID */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { uint8_t *eld = connector->eld; u8 *cea; u8 *name; u8 *db; int sad_count = 0; int mnl; int dbl; memset(eld, 0, sizeof(connector->eld)); cea = drm_find_cea_extension(edid); if (!cea) { DRM_DEBUG_KMS("ELD: no CEA Extension found\n"); return; } name = NULL; drm_for_each_detailed_block((u8 *)edid, monitor_name, &name); for (mnl = 0; name && mnl < 13; mnl++) { if (name[mnl] == 0x0a) break; eld[20 + mnl] = name[mnl]; } eld[4] = (cea[1] << 5) | mnl; DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20); eld[0] = 2 << 3; /* ELD version: 2 */ eld[16] = edid->mfg_id[0]; eld[17] = edid->mfg_id[1]; eld[18] = edid->prod_code[0]; eld[19] = edid->prod_code[1]; if (cea_revision(cea) >= 3) { int i, start, end; if (cea_db_offsets(cea, &start, &end)) { start = 0; end = 0; } for_each_cea_db(cea, i, start, end) { db = &cea[i]; dbl = cea_db_payload_len(db); switch (cea_db_tag(db)) { case AUDIO_BLOCK: /* Audio Data Block, contains SADs */ sad_count = dbl / 3; if (dbl >= 1) memcpy(eld + 20 + mnl, &db[1], dbl); break; case SPEAKER_BLOCK: /* Speaker Allocation Data Block */ if (dbl >= 1) eld[7] = db[1]; break; case VENDOR_BLOCK: /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) parse_hdmi_vsdb(connector, db); break; default: break; } } } eld[5] |= sad_count << 4; eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); } EXPORT_SYMBOL(drm_edid_to_eld); /** * drm_edid_to_sad - extracts SADs from EDID * @edid: EDID to parse * @sads: pointer that will be set to the extracted SADs * * Looks for CEA EDID block and extracts SADs (Short Audio Descriptors) from it. * Note: returned pointer needs to be kfreed * * Return number of found SADs or negative number on error. */ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) { int count = 0; int i, start, end, dbl; u8 *cea; cea = drm_find_cea_extension(edid); if (!cea) { DRM_DEBUG_KMS("SAD: no CEA Extension found\n"); return -ENOENT; } if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); return -ENOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { DRM_DEBUG_KMS("SAD: invalid data block offsets\n"); return -EPROTO; } for_each_cea_db(cea, i, start, end) { u8 *db = &cea[i]; if (cea_db_tag(db) == AUDIO_BLOCK) { int j; dbl = cea_db_payload_len(db); count = dbl / 3; /* SAD is 3B */ *sads = kcalloc(count, sizeof(**sads), GFP_KERNEL); if (!*sads) return -ENOMEM; for (j = 0; j < count; j++) { u8 *sad = &db[1 + j * 3]; (*sads)[j].format = (sad[0] & 0x78) >> 3; (*sads)[j].channels = sad[0] & 0x7; (*sads)[j].freq = sad[1] & 0x7F; (*sads)[j].byte2 = sad[2]; } break; } } return count; } EXPORT_SYMBOL(drm_edid_to_sad); /** * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond * @connector: connector associated with the HDMI/DP sink * @mode: the display mode */ int drm_av_sync_delay(struct drm_connector *connector, struct drm_display_mode *mode) { int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); int a, v; if (!connector->latency_present[0]) return 0; if (!connector->latency_present[1]) i = 0; a = connector->audio_latency[i]; v = connector->video_latency[i]; /* * HDMI/DP sink doesn't support audio or video? */ if (a == 255 || v == 255) return 0; /* * Convert raw EDID values to millisecond. * Treat unknown latency as 0ms. */ if (a) a = min(2 * (a - 1), 500); if (v) v = min(2 * (v - 1), 500); return max(v - a, 0); } EXPORT_SYMBOL(drm_av_sync_delay); /** * drm_select_eld - select one ELD from multiple HDMI/DP sinks * @encoder: the encoder just changed display mode * @mode: the adjusted display mode * * It's possible for one encoder to be associated with multiple HDMI/DP sinks. * The policy is now hard coded to simply use the first HDMI/DP sink's ELD. */ struct drm_connector *drm_select_eld(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder && connector->eld[0]) return connector; return NULL; } EXPORT_SYMBOL(drm_select_eld); /** * drm_detect_hdmi_monitor - detect whether monitor is hdmi. * @edid: monitor EDID information * * Parse the CEA extension according to CEA-861-B. * Return true if HDMI, false if not or unknown. */ bool drm_detect_hdmi_monitor(struct edid *edid) { u8 *edid_ext; int i; int start_offset, end_offset; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) return false; if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) return false; /* * Because HDMI identifier is in Vendor Specific Block, * search it from all data blocks of CEA extension. */ for_each_cea_db(edid_ext, i, start_offset, end_offset) { if (cea_db_is_hdmi_vsdb(&edid_ext[i])) return true; } return false; } EXPORT_SYMBOL(drm_detect_hdmi_monitor); /** * drm_detect_monitor_audio - check monitor audio capability * * Monitor should have CEA extension block. * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic * audio' only. If there is any audio extension block and supported * audio format, assume at least 'basic audio' support, even if 'basic * audio' is not defined in EDID. * */ bool drm_detect_monitor_audio(struct edid *edid) { u8 *edid_ext; int i, j; bool has_audio = false; int start_offset, end_offset; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) goto end; has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); goto end; } if (cea_db_offsets(edid_ext, &start_offset, &end_offset)) goto end; for_each_cea_db(edid_ext, i, start_offset, end_offset) { if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) { has_audio = true; for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3) DRM_DEBUG_KMS("CEA audio format %d\n", (edid_ext[i + j] >> 3) & 0xf); goto end; } } end: return has_audio; } EXPORT_SYMBOL(drm_detect_monitor_audio); /** * drm_rgb_quant_range_selectable - is RGB quantization range selectable? * * Check whether the monitor reports the RGB quantization range selection * as supported. The AVI infoframe can then be used to inform the monitor * which quantization range (full or limited) is used. */ bool drm_rgb_quant_range_selectable(struct edid *edid) { u8 *edid_ext; int i, start, end; edid_ext = drm_find_cea_extension(edid); if (!edid_ext) return false; if (cea_db_offsets(edid_ext, &start, &end)) return false; for_each_cea_db(edid_ext, i, start, end) { if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && cea_db_payload_len(&edid_ext[i]) == 2) { DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); return edid_ext[i + 2] & EDID_CEA_VCDB_QS; } } return false; } EXPORT_SYMBOL(drm_rgb_quant_range_selectable); /** * drm_add_display_info - pull display info out if present * @edid: EDID data * @info: display info (attached to connector) * * Grab any available display info and stuff it into the drm_display_info * structure that's part of the connector. Useful for tracking bpp and * color spaces. */ static void drm_add_display_info(struct edid *edid, struct drm_display_info *info) { u8 *edid_ext; info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; /* driver figures it out in this case */ info->bpc = 0; info->color_formats = 0; if (edid->revision < 3) return; if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return; /* Get data from CEA blocks if present */ edid_ext = drm_find_cea_extension(edid); if (edid_ext) { info->cea_rev = edid_ext[1]; /* The existence of a CEA block should imply RGB support */ info->color_formats = DRM_COLOR_FORMAT_RGB444; if (edid_ext[3] & EDID_CEA_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid_ext[3] & EDID_CEA_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return; switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { case DRM_EDID_DIGITAL_DEPTH_6: info->bpc = 6; break; case DRM_EDID_DIGITAL_DEPTH_8: info->bpc = 8; break; case DRM_EDID_DIGITAL_DEPTH_10: info->bpc = 10; break; case DRM_EDID_DIGITAL_DEPTH_12: info->bpc = 12; break; case DRM_EDID_DIGITAL_DEPTH_14: info->bpc = 14; break; case DRM_EDID_DIGITAL_DEPTH_16: info->bpc = 16; break; case DRM_EDID_DIGITAL_DEPTH_UNDEF: default: info->bpc = 0; break; } info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing * @edid: edid data * * Add the specified modes to the connector's mode list. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) { int num_modes = 0; u32 quirks; if (edid == NULL) { return 0; } if (!drm_edid_is_valid(edid)) { dev_warn(connector->dev->dev, "%s: EDID invalid.\n", drm_get_connector_name(connector)); return 0; } quirks = edid_get_quirks(edid); /* * EDID spec says modes should be preferred in this order: * - preferred detailed mode * - other detailed modes from base block * - detailed modes from extension blocks * - CVT 3-byte code modes * - standard timing codes * - established timing codes * - modes inferred from GTF or CVT range information * * We get this pretty much right. * * XXX order for additional mode types in extension blocks? */ num_modes += add_detailed_modes(connector, edid, quirks); num_modes += add_cvt_modes(connector, edid); num_modes += add_standard_modes(connector, edid); num_modes += add_established_modes(connector, edid); if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) num_modes += add_inferred_modes(connector, edid); num_modes += add_cea_modes(connector, edid); if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) edid_fixup_preferred(connector, quirks); drm_add_display_info(edid, &connector->display_info); return num_modes; } EXPORT_SYMBOL(drm_add_edid_modes); /** * drm_add_modes_noedid - add modes for the connectors without EDID * @connector: connector we're probing * @hdisplay: the horizontal display limit * @vdisplay: the vertical display limit * * Add the specified modes to the connector's mode list. Only when the * hdisplay/vdisplay is not beyond the given limit, it will be added. * * Return number of modes added or 0 if we couldn't find any. */ int drm_add_modes_noedid(struct drm_connector *connector, int hdisplay, int vdisplay) { int i, count, num_modes = 0; struct drm_display_mode *mode; struct drm_device *dev = connector->dev; count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); if (hdisplay < 0) hdisplay = 0; if (vdisplay < 0) vdisplay = 0; for (i = 0; i < count; i++) { const struct drm_display_mode *ptr = &drm_dmt_modes[i]; if (hdisplay && vdisplay) { /* * Only when two are valid, they will be used to check * whether the mode should be added to the mode list of * the connector. */ if (ptr->hdisplay > hdisplay || ptr->vdisplay > vdisplay) continue; } if (drm_mode_vrefresh(ptr) > 61) continue; mode = drm_mode_duplicate(dev, ptr); if (mode) { drm_mode_probed_add(connector, mode); num_modes++; } } return num_modes; } EXPORT_SYMBOL(drm_add_modes_noedid); /** * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with * data from a DRM display mode * @frame: HDMI AVI infoframe * @mode: DRM display mode * * Returns 0 on success or a negative error code on failure. */ int drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, const struct drm_display_mode *mode) { int err; if (!frame || !mode) return -EINVAL; err = hdmi_avi_infoframe_init(frame); if (err < 0) return err; frame->video_code = drm_match_cea_mode(mode); if (!frame->video_code) return 0; frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; return 0; } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
gpl-2.0
r1mikey/linux-picoxcell
drivers/net/wireless/rndis_wlan.c
224
102460
/* * Driver for RNDIS based wireless USB devices. * * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net> * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Portions of this file are based on NDISwrapper project, * Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani * http://ndiswrapper.sourceforge.net/ */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/ieee80211.h> #include <linux/if_arp.h> #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <net/cfg80211.h> #include <linux/usb/usbnet.h> #include <linux/usb/rndis_host.h> /* NOTE: All these are settings for Broadcom chipset */ static char modparam_country[4] = "EU"; module_param_string(country, modparam_country, 4, 0444); MODULE_PARM_DESC(country, "Country code (ISO 3166-1 alpha-2), default: EU"); static int modparam_frameburst = 1; module_param_named(frameburst, modparam_frameburst, int, 0444); MODULE_PARM_DESC(frameburst, "enable frame bursting (default: on)"); static int modparam_afterburner = 0; module_param_named(afterburner, modparam_afterburner, int, 0444); MODULE_PARM_DESC(afterburner, "enable afterburner aka '125 High Speed Mode' (default: off)"); static int modparam_power_save = 0; module_param_named(power_save, modparam_power_save, int, 0444); MODULE_PARM_DESC(power_save, "set power save mode: 0=off, 1=on, 2=fast (default: off)"); static int modparam_power_output = 3; module_param_named(power_output, modparam_power_output, int, 0444); MODULE_PARM_DESC(power_output, "set power output: 0=25%, 1=50%, 2=75%, 3=100% (default: 100%)"); static int modparam_roamtrigger = -70; module_param_named(roamtrigger, modparam_roamtrigger, int, 0444); MODULE_PARM_DESC(roamtrigger, "set roaming dBm trigger: -80=optimize for distance, " "-60=bandwidth (default: -70)"); static int modparam_roamdelta = 1; module_param_named(roamdelta, modparam_roamdelta, int, 0444); MODULE_PARM_DESC(roamdelta, "set roaming tendency: 0=aggressive, 1=moderate, " "2=conservative (default: moderate)"); static int modparam_workaround_interval; module_param_named(workaround_interval, modparam_workaround_interval, int, 0444); MODULE_PARM_DESC(workaround_interval, "set stall workaround interval in msecs (0=disabled) (default: 0)"); /* Typical noise/maximum signal level values taken from ndiswrapper iw_ndis.h */ #define WL_NOISE -96 /* typical noise level in dBm */ #define WL_SIGMAX -32 /* typical maximum signal level in dBm */ /* Assume that Broadcom 4320 (only chipset at time of writing known to be * based on wireless rndis) has default txpower of 13dBm. * This value is from Linksys WUSB54GSC User Guide, Appendix F: Specifications. * 100% : 20 mW ~ 13dBm * 75% : 15 mW ~ 12dBm * 50% : 10 mW ~ 10dBm * 25% : 5 mW ~ 7dBm */ #define BCM4320_DEFAULT_TXPOWER_DBM_100 13 #define BCM4320_DEFAULT_TXPOWER_DBM_75 12 #define BCM4320_DEFAULT_TXPOWER_DBM_50 10 #define BCM4320_DEFAULT_TXPOWER_DBM_25 7 /* Known device types */ #define RNDIS_UNKNOWN 0 #define RNDIS_BCM4320A 1 #define RNDIS_BCM4320B 2 /* NDIS data structures. Taken from wpa_supplicant driver_ndis.c * slightly modified for datatype endianess, etc */ #define NDIS_802_11_LENGTH_SSID 32 #define NDIS_802_11_LENGTH_RATES 8 #define NDIS_802_11_LENGTH_RATES_EX 16 enum ndis_80211_net_type { NDIS_80211_TYPE_FREQ_HOP, NDIS_80211_TYPE_DIRECT_SEQ, NDIS_80211_TYPE_OFDM_A, NDIS_80211_TYPE_OFDM_G }; enum ndis_80211_net_infra { NDIS_80211_INFRA_ADHOC, NDIS_80211_INFRA_INFRA, NDIS_80211_INFRA_AUTO_UNKNOWN }; enum ndis_80211_auth_mode { NDIS_80211_AUTH_OPEN, NDIS_80211_AUTH_SHARED, NDIS_80211_AUTH_AUTO_SWITCH, NDIS_80211_AUTH_WPA, NDIS_80211_AUTH_WPA_PSK, NDIS_80211_AUTH_WPA_NONE, NDIS_80211_AUTH_WPA2, NDIS_80211_AUTH_WPA2_PSK }; enum ndis_80211_encr_status { NDIS_80211_ENCR_WEP_ENABLED, NDIS_80211_ENCR_DISABLED, NDIS_80211_ENCR_WEP_KEY_ABSENT, NDIS_80211_ENCR_NOT_SUPPORTED, NDIS_80211_ENCR_TKIP_ENABLED, NDIS_80211_ENCR_TKIP_KEY_ABSENT, NDIS_80211_ENCR_CCMP_ENABLED, NDIS_80211_ENCR_CCMP_KEY_ABSENT }; enum ndis_80211_priv_filter { NDIS_80211_PRIV_ACCEPT_ALL, NDIS_80211_PRIV_8021X_WEP }; enum ndis_80211_status_type { NDIS_80211_STATUSTYPE_AUTHENTICATION, NDIS_80211_STATUSTYPE_MEDIASTREAMMODE, NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST, NDIS_80211_STATUSTYPE_RADIOSTATE, }; enum ndis_80211_media_stream_mode { NDIS_80211_MEDIA_STREAM_OFF, NDIS_80211_MEDIA_STREAM_ON }; enum ndis_80211_radio_status { NDIS_80211_RADIO_STATUS_ON, NDIS_80211_RADIO_STATUS_HARDWARE_OFF, NDIS_80211_RADIO_STATUS_SOFTWARE_OFF, }; enum ndis_80211_addkey_bits { NDIS_80211_ADDKEY_8021X_AUTH = cpu_to_le32(1 << 28), NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ = cpu_to_le32(1 << 29), NDIS_80211_ADDKEY_PAIRWISE_KEY = cpu_to_le32(1 << 30), NDIS_80211_ADDKEY_TRANSMIT_KEY = cpu_to_le32(1 << 31) }; enum ndis_80211_addwep_bits { NDIS_80211_ADDWEP_PERCLIENT_KEY = cpu_to_le32(1 << 30), NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31) }; enum ndis_80211_power_mode { NDIS_80211_POWER_MODE_CAM, NDIS_80211_POWER_MODE_MAX_PSP, NDIS_80211_POWER_MODE_FAST_PSP, }; enum ndis_80211_pmkid_cand_list_flag_bits { NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0) }; struct ndis_80211_auth_request { __le32 length; u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; struct ndis_80211_pmkid_candidate { u8 bssid[ETH_ALEN]; u8 padding[2]; __le32 flags; } __packed; struct ndis_80211_pmkid_cand_list { __le32 version; __le32 num_candidates; struct ndis_80211_pmkid_candidate candidate_list[0]; } __packed; struct ndis_80211_status_indication { __le32 status_type; union { __le32 media_stream_mode; __le32 radio_status; struct ndis_80211_auth_request auth_request[0]; struct ndis_80211_pmkid_cand_list cand_list; } u; } __packed; struct ndis_80211_ssid { __le32 length; u8 essid[NDIS_802_11_LENGTH_SSID]; } __packed; struct ndis_80211_conf_freq_hop { __le32 length; __le32 hop_pattern; __le32 hop_set; __le32 dwell_time; } __packed; struct ndis_80211_conf { __le32 length; __le32 beacon_period; __le32 atim_window; __le32 ds_config; struct ndis_80211_conf_freq_hop fh_config; } __packed; struct ndis_80211_bssid_ex { __le32 length; u8 mac[ETH_ALEN]; u8 padding[2]; struct ndis_80211_ssid ssid; __le32 privacy; __le32 rssi; __le32 net_type; struct ndis_80211_conf config; __le32 net_infra; u8 rates[NDIS_802_11_LENGTH_RATES_EX]; __le32 ie_length; u8 ies[0]; } __packed; struct ndis_80211_bssid_list_ex { __le32 num_items; struct ndis_80211_bssid_ex bssid[0]; } __packed; struct ndis_80211_fixed_ies { u8 timestamp[8]; __le16 beacon_interval; __le16 capabilities; } __packed; struct ndis_80211_wep_key { __le32 size; __le32 index; __le32 length; u8 material[32]; } __packed; struct ndis_80211_key { __le32 size; __le32 index; __le32 length; u8 bssid[ETH_ALEN]; u8 padding[6]; u8 rsc[8]; u8 material[32]; } __packed; struct ndis_80211_remove_key { __le32 size; __le32 index; u8 bssid[ETH_ALEN]; u8 padding[2]; } __packed; struct ndis_config_param { __le32 name_offs; __le32 name_length; __le32 type; __le32 value_offs; __le32 value_length; } __packed; struct ndis_80211_assoc_info { __le32 length; __le16 req_ies; struct req_ie { __le16 capa; __le16 listen_interval; u8 cur_ap_address[ETH_ALEN]; } req_ie; __le32 req_ie_length; __le32 offset_req_ies; __le16 resp_ies; struct resp_ie { __le16 capa; __le16 status_code; __le16 assoc_id; } resp_ie; __le32 resp_ie_length; __le32 offset_resp_ies; } __packed; struct ndis_80211_auth_encr_pair { __le32 auth_mode; __le32 encr_mode; } __packed; struct ndis_80211_capability { __le32 length; __le32 version; __le32 num_pmkids; __le32 num_auth_encr_pair; struct ndis_80211_auth_encr_pair auth_encr_pair[0]; } __packed; struct ndis_80211_bssid_info { u8 bssid[ETH_ALEN]; u8 pmkid[16]; } __packed; struct ndis_80211_pmkid { __le32 length; __le32 bssid_info_count; struct ndis_80211_bssid_info bssid_info[0]; } __packed; /* * private data */ #define CAP_MODE_80211A 1 #define CAP_MODE_80211B 2 #define CAP_MODE_80211G 4 #define CAP_MODE_MASK 7 #define WORK_LINK_UP 0 #define WORK_LINK_DOWN 1 #define WORK_SET_MULTICAST_LIST 2 #define RNDIS_WLAN_ALG_NONE 0 #define RNDIS_WLAN_ALG_WEP (1<<0) #define RNDIS_WLAN_ALG_TKIP (1<<1) #define RNDIS_WLAN_ALG_CCMP (1<<2) #define RNDIS_WLAN_NUM_KEYS 4 #define RNDIS_WLAN_KEY_MGMT_NONE 0 #define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0) #define RNDIS_WLAN_KEY_MGMT_PSK (1<<1) #define COMMAND_BUFFER_SIZE (CONTROL_BUFFER_SIZE + sizeof(struct rndis_set)) static const struct ieee80211_channel rndis_channels[] = { { .center_freq = 2412 }, { .center_freq = 2417 }, { .center_freq = 2422 }, { .center_freq = 2427 }, { .center_freq = 2432 }, { .center_freq = 2437 }, { .center_freq = 2442 }, { .center_freq = 2447 }, { .center_freq = 2452 }, { .center_freq = 2457 }, { .center_freq = 2462 }, { .center_freq = 2467 }, { .center_freq = 2472 }, { .center_freq = 2484 }, }; static const struct ieee80211_rate rndis_rates[] = { { .bitrate = 10 }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60 }, { .bitrate = 90 }, { .bitrate = 120 }, { .bitrate = 180 }, { .bitrate = 240 }, { .bitrate = 360 }, { .bitrate = 480 }, { .bitrate = 540 } }; static const u32 rndis_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; struct rndis_wlan_encr_key { int len; u32 cipher; u8 material[32]; u8 bssid[ETH_ALEN]; bool pairwise; bool tx_key; }; /* RNDIS device private data */ struct rndis_wlan_private { struct usbnet *usbdev; struct wireless_dev wdev; struct cfg80211_scan_request *scan_request; struct workqueue_struct *workqueue; struct delayed_work dev_poller_work; struct delayed_work scan_work; struct work_struct work; struct mutex command_lock; unsigned long work_pending; int last_qual; s32 cqm_rssi_thold; u32 cqm_rssi_hyst; int last_cqm_event_rssi; struct ieee80211_supported_band band; struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)]; struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)]; u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)]; int device_type; int caps; int multicast_size; /* module parameters */ char param_country[4]; int param_frameburst; int param_afterburner; int param_power_save; int param_power_output; int param_roamtrigger; int param_roamdelta; u32 param_workaround_interval; /* hardware state */ bool radio_on; int power_mode; int infra_mode; bool connected; u8 bssid[ETH_ALEN]; u32 current_command_oid; /* encryption stuff */ u8 encr_tx_key_index; struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS]; int wpa_version; u8 command_buffer[COMMAND_BUFFER_SIZE]; }; /* * cfg80211 ops */ static int rndis_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, u32 *flags, struct vif_params *params); static int rndis_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request); static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); static int rndis_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm); static int rndis_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm); static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme); static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code); static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params); static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev); static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params); static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr); static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast); static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo); static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo); static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa); static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev); static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout); static int rndis_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst); static const struct cfg80211_ops rndis_config_ops = { .change_virtual_intf = rndis_change_virtual_intf, .scan = rndis_scan, .set_wiphy_params = rndis_set_wiphy_params, .set_tx_power = rndis_set_tx_power, .get_tx_power = rndis_get_tx_power, .connect = rndis_connect, .disconnect = rndis_disconnect, .join_ibss = rndis_join_ibss, .leave_ibss = rndis_leave_ibss, .add_key = rndis_add_key, .del_key = rndis_del_key, .set_default_key = rndis_set_default_key, .get_station = rndis_get_station, .dump_station = rndis_dump_station, .set_pmksa = rndis_set_pmksa, .del_pmksa = rndis_del_pmksa, .flush_pmksa = rndis_flush_pmksa, .set_power_mgmt = rndis_set_power_mgmt, .set_cqm_rssi_config = rndis_set_cqm_rssi_config, }; static void *rndis_wiphy_privid = &rndis_wiphy_privid; static struct rndis_wlan_private *get_rndis_wlan_priv(struct usbnet *dev) { return (struct rndis_wlan_private *)dev->driver_priv; } static u32 get_bcm4320_power_dbm(struct rndis_wlan_private *priv) { switch (priv->param_power_output) { default: case 3: return BCM4320_DEFAULT_TXPOWER_DBM_100; case 2: return BCM4320_DEFAULT_TXPOWER_DBM_75; case 1: return BCM4320_DEFAULT_TXPOWER_DBM_50; case 0: return BCM4320_DEFAULT_TXPOWER_DBM_25; } } static bool is_wpa_key(struct rndis_wlan_private *priv, u8 idx) { int cipher = priv->encr_keys[idx].cipher; return (cipher == WLAN_CIPHER_SUITE_CCMP || cipher == WLAN_CIPHER_SUITE_TKIP); } static int rndis_cipher_to_alg(u32 cipher) { switch (cipher) { default: return RNDIS_WLAN_ALG_NONE; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return RNDIS_WLAN_ALG_WEP; case WLAN_CIPHER_SUITE_TKIP: return RNDIS_WLAN_ALG_TKIP; case WLAN_CIPHER_SUITE_CCMP: return RNDIS_WLAN_ALG_CCMP; } } static int rndis_akm_suite_to_key_mgmt(u32 akm_suite) { switch (akm_suite) { default: return RNDIS_WLAN_KEY_MGMT_NONE; case WLAN_AKM_SUITE_8021X: return RNDIS_WLAN_KEY_MGMT_802_1X; case WLAN_AKM_SUITE_PSK: return RNDIS_WLAN_KEY_MGMT_PSK; } } #ifdef DEBUG static const char *oid_to_string(u32 oid) { switch (oid) { #define OID_STR(oid) case oid: return(#oid) /* from rndis_host.h */ OID_STR(RNDIS_OID_802_3_PERMANENT_ADDRESS); OID_STR(RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE); OID_STR(RNDIS_OID_GEN_CURRENT_PACKET_FILTER); OID_STR(RNDIS_OID_GEN_PHYSICAL_MEDIUM); /* from rndis_wlan.c */ OID_STR(RNDIS_OID_GEN_LINK_SPEED); OID_STR(RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER); OID_STR(RNDIS_OID_GEN_XMIT_OK); OID_STR(RNDIS_OID_GEN_RCV_OK); OID_STR(RNDIS_OID_GEN_XMIT_ERROR); OID_STR(RNDIS_OID_GEN_RCV_ERROR); OID_STR(RNDIS_OID_GEN_RCV_NO_BUFFER); OID_STR(RNDIS_OID_802_3_CURRENT_ADDRESS); OID_STR(RNDIS_OID_802_3_MULTICAST_LIST); OID_STR(RNDIS_OID_802_3_MAXIMUM_LIST_SIZE); OID_STR(RNDIS_OID_802_11_BSSID); OID_STR(RNDIS_OID_802_11_SSID); OID_STR(RNDIS_OID_802_11_INFRASTRUCTURE_MODE); OID_STR(RNDIS_OID_802_11_ADD_WEP); OID_STR(RNDIS_OID_802_11_REMOVE_WEP); OID_STR(RNDIS_OID_802_11_DISASSOCIATE); OID_STR(RNDIS_OID_802_11_AUTHENTICATION_MODE); OID_STR(RNDIS_OID_802_11_PRIVACY_FILTER); OID_STR(RNDIS_OID_802_11_BSSID_LIST_SCAN); OID_STR(RNDIS_OID_802_11_ENCRYPTION_STATUS); OID_STR(RNDIS_OID_802_11_ADD_KEY); OID_STR(RNDIS_OID_802_11_REMOVE_KEY); OID_STR(RNDIS_OID_802_11_ASSOCIATION_INFORMATION); OID_STR(RNDIS_OID_802_11_CAPABILITY); OID_STR(RNDIS_OID_802_11_PMKID); OID_STR(RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED); OID_STR(RNDIS_OID_802_11_NETWORK_TYPE_IN_USE); OID_STR(RNDIS_OID_802_11_TX_POWER_LEVEL); OID_STR(RNDIS_OID_802_11_RSSI); OID_STR(RNDIS_OID_802_11_RSSI_TRIGGER); OID_STR(RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD); OID_STR(RNDIS_OID_802_11_RTS_THRESHOLD); OID_STR(RNDIS_OID_802_11_SUPPORTED_RATES); OID_STR(RNDIS_OID_802_11_CONFIGURATION); OID_STR(RNDIS_OID_802_11_POWER_MODE); OID_STR(RNDIS_OID_802_11_BSSID_LIST); #undef OID_STR } return "?"; } #else static const char *oid_to_string(u32 oid) { return "?"; } #endif /* translate error code */ static int rndis_error_status(__le32 rndis_status) { int ret = -EINVAL; switch (le32_to_cpu(rndis_status)) { case RNDIS_STATUS_SUCCESS: ret = 0; break; case RNDIS_STATUS_FAILURE: case RNDIS_STATUS_INVALID_DATA: ret = -EINVAL; break; case RNDIS_STATUS_NOT_SUPPORTED: ret = -EOPNOTSUPP; break; case RNDIS_STATUS_ADAPTER_NOT_READY: case RNDIS_STATUS_ADAPTER_NOT_OPEN: ret = -EBUSY; break; } return ret; } static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); union { void *buf; struct rndis_msg_hdr *header; struct rndis_query *get; struct rndis_query_c *get_c; } u; int ret, buflen; int resplen, respoffs, copylen; buflen = *len + sizeof(*u.get); if (buflen < CONTROL_BUFFER_SIZE) buflen = CONTROL_BUFFER_SIZE; if (buflen > COMMAND_BUFFER_SIZE) { u.buf = kmalloc(buflen, GFP_KERNEL); if (!u.buf) return -ENOMEM; } else { u.buf = priv->command_buffer; } mutex_lock(&priv->command_lock); memset(u.get, 0, sizeof *u.get); u.get->msg_type = cpu_to_le32(RNDIS_MSG_QUERY); u.get->msg_len = cpu_to_le32(sizeof *u.get); u.get->oid = cpu_to_le32(oid); priv->current_command_oid = oid; ret = rndis_command(dev, u.header, buflen); priv->current_command_oid = 0; if (ret < 0) netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", __func__, oid_to_string(oid), ret, le32_to_cpu(u.get_c->status)); if (ret == 0) { resplen = le32_to_cpu(u.get_c->len); respoffs = le32_to_cpu(u.get_c->offset) + 8; if (respoffs > buflen) { /* Device returned data offset outside buffer, error. */ netdev_dbg(dev->net, "%s(%s): received invalid " "data offset: %d > %d\n", __func__, oid_to_string(oid), respoffs, buflen); ret = -EINVAL; goto exit_unlock; } if ((resplen + respoffs) > buflen) { /* Device would have returned more data if buffer would * have been big enough. Copy just the bits that we got. */ copylen = buflen - respoffs; } else { copylen = resplen; } if (copylen > *len) copylen = *len; memcpy(data, u.buf + respoffs, copylen); *len = resplen; ret = rndis_error_status(u.get_c->status); if (ret < 0) netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", __func__, oid_to_string(oid), le32_to_cpu(u.get_c->status), ret); } exit_unlock: mutex_unlock(&priv->command_lock); if (u.buf != priv->command_buffer) kfree(u.buf); return ret; } static int rndis_set_oid(struct usbnet *dev, u32 oid, const void *data, int len) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); union { void *buf; struct rndis_msg_hdr *header; struct rndis_set *set; struct rndis_set_c *set_c; } u; int ret, buflen; buflen = len + sizeof(*u.set); if (buflen < CONTROL_BUFFER_SIZE) buflen = CONTROL_BUFFER_SIZE; if (buflen > COMMAND_BUFFER_SIZE) { u.buf = kmalloc(buflen, GFP_KERNEL); if (!u.buf) return -ENOMEM; } else { u.buf = priv->command_buffer; } mutex_lock(&priv->command_lock); memset(u.set, 0, sizeof *u.set); u.set->msg_type = cpu_to_le32(RNDIS_MSG_SET); u.set->msg_len = cpu_to_le32(sizeof(*u.set) + len); u.set->oid = cpu_to_le32(oid); u.set->len = cpu_to_le32(len); u.set->offset = cpu_to_le32(sizeof(*u.set) - 8); u.set->handle = cpu_to_le32(0); memcpy(u.buf + sizeof(*u.set), data, len); priv->current_command_oid = oid; ret = rndis_command(dev, u.header, buflen); priv->current_command_oid = 0; if (ret < 0) netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n", __func__, oid_to_string(oid), ret, le32_to_cpu(u.set_c->status)); if (ret == 0) { ret = rndis_error_status(u.set_c->status); if (ret < 0) netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n", __func__, oid_to_string(oid), le32_to_cpu(u.set_c->status), ret); } mutex_unlock(&priv->command_lock); if (u.buf != priv->command_buffer) kfree(u.buf); return ret; } static int rndis_reset(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct rndis_reset *reset; int ret; mutex_lock(&priv->command_lock); reset = (void *)priv->command_buffer; memset(reset, 0, sizeof(*reset)); reset->msg_type = cpu_to_le32(RNDIS_MSG_RESET); reset->msg_len = cpu_to_le32(sizeof(*reset)); priv->current_command_oid = 0; ret = rndis_command(usbdev, (void *)reset, CONTROL_BUFFER_SIZE); mutex_unlock(&priv->command_lock); if (ret < 0) return ret; return 0; } /* * Specs say that we can only set config parameters only soon after device * initialization. * value_type: 0 = u32, 2 = unicode string */ static int rndis_set_config_parameter(struct usbnet *dev, char *param, int value_type, void *value) { struct ndis_config_param *infobuf; int value_len, info_len, param_len, ret, i; __le16 *unibuf; __le32 *dst_value; if (value_type == 0) value_len = sizeof(__le32); else if (value_type == 2) value_len = strlen(value) * sizeof(__le16); else return -EINVAL; param_len = strlen(param) * sizeof(__le16); info_len = sizeof(*infobuf) + param_len + value_len; #ifdef DEBUG info_len += 12; #endif infobuf = kmalloc(info_len, GFP_KERNEL); if (!infobuf) return -ENOMEM; #ifdef DEBUG info_len -= 12; /* extra 12 bytes are for padding (debug output) */ memset(infobuf, 0xCC, info_len + 12); #endif if (value_type == 2) netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n", param, (u8 *)value); else netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n", param, *(u32 *)value); infobuf->name_offs = cpu_to_le32(sizeof(*infobuf)); infobuf->name_length = cpu_to_le32(param_len); infobuf->type = cpu_to_le32(value_type); infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len); infobuf->value_length = cpu_to_le32(value_len); /* simple string to unicode string conversion */ unibuf = (void *)infobuf + sizeof(*infobuf); for (i = 0; i < param_len / sizeof(__le16); i++) unibuf[i] = cpu_to_le16(param[i]); if (value_type == 2) { unibuf = (void *)infobuf + sizeof(*infobuf) + param_len; for (i = 0; i < value_len / sizeof(__le16); i++) unibuf[i] = cpu_to_le16(((u8 *)value)[i]); } else { dst_value = (void *)infobuf + sizeof(*infobuf) + param_len; *dst_value = cpu_to_le32(*(u32 *)value); } #ifdef DEBUG netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len); for (i = 0; i < info_len; i += 12) { u32 *tmp = (u32 *)((u8 *)infobuf + i); netdev_dbg(dev->net, "%08X:%08X:%08X\n", cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]), cpu_to_be32(tmp[2])); } #endif ret = rndis_set_oid(dev, RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER, infobuf, info_len); if (ret != 0) netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n", ret); kfree(infobuf); return ret; } static int rndis_set_config_parameter_str(struct usbnet *dev, char *param, char *value) { return rndis_set_config_parameter(dev, param, 2, value); } /* * data conversion functions */ static int level_to_qual(int level) { int qual = 100 * (level - WL_NOISE) / (WL_SIGMAX - WL_NOISE); return qual >= 0 ? (qual <= 100 ? qual : 100) : 0; } /* * common functions */ static int set_infra_mode(struct usbnet *usbdev, int mode); static void restore_keys(struct usbnet *usbdev); static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid, bool *matched); static int rndis_start_bssid_list_scan(struct usbnet *usbdev) { __le32 tmp; /* Note: RNDIS_OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */ tmp = cpu_to_le32(1); return rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp, sizeof(tmp)); } static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int ret; ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_SSID, ssid, sizeof(*ssid)); if (ret < 0) { netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret); return ret; } if (ret == 0) { priv->radio_on = true; netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__); } return ret; } static int set_bssid(struct usbnet *usbdev, const u8 *bssid) { int ret; ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID, bssid, ETH_ALEN); if (ret < 0) { netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n", bssid, ret); return ret; } return ret; } static int clear_bssid(struct usbnet *usbdev) { static const u8 broadcast_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; return set_bssid(usbdev, broadcast_mac); } static int get_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) { int ret, len; len = ETH_ALEN; ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID, bssid, &len); if (ret != 0) eth_zero_addr(bssid); return ret; } static int get_association_info(struct usbnet *usbdev, struct ndis_80211_assoc_info *info, int len) { return rndis_query_oid(usbdev, RNDIS_OID_802_11_ASSOCIATION_INFORMATION, info, &len); } static bool is_associated(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); u8 bssid[ETH_ALEN]; int ret; if (!priv->radio_on) return false; ret = get_bssid(usbdev, bssid); return (ret == 0 && !is_zero_ether_addr(bssid)); } static int disassociate(struct usbnet *usbdev, bool reset_ssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_ssid ssid; int i, ret = 0; if (priv->radio_on) { ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_DISASSOCIATE, NULL, 0); if (ret == 0) { priv->radio_on = false; netdev_dbg(usbdev->net, "%s(): radio_on = false\n", __func__); if (reset_ssid) msleep(100); } } /* disassociate causes radio to be turned off; if reset_ssid * is given, set random ssid to enable radio */ if (reset_ssid) { /* Set device to infrastructure mode so we don't get ad-hoc * 'media connect' indications with the random ssid. */ set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); ssid.length = cpu_to_le32(sizeof(ssid.essid)); get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2); ssid.essid[0] = 0x1; ssid.essid[1] = 0xff; for (i = 2; i < sizeof(ssid.essid); i++) ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff); ret = set_essid(usbdev, &ssid); } return ret; } static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version, enum nl80211_auth_type auth_type, int keymgmt) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); __le32 tmp; int auth_mode, ret; netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n", __func__, wpa_version, auth_type, keymgmt); if (wpa_version & NL80211_WPA_VERSION_2) { if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) auth_mode = NDIS_80211_AUTH_WPA2; else auth_mode = NDIS_80211_AUTH_WPA2_PSK; } else if (wpa_version & NL80211_WPA_VERSION_1) { if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X) auth_mode = NDIS_80211_AUTH_WPA; else if (keymgmt & RNDIS_WLAN_KEY_MGMT_PSK) auth_mode = NDIS_80211_AUTH_WPA_PSK; else auth_mode = NDIS_80211_AUTH_WPA_NONE; } else if (auth_type == NL80211_AUTHTYPE_SHARED_KEY) auth_mode = NDIS_80211_AUTH_SHARED; else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) auth_mode = NDIS_80211_AUTH_OPEN; else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC) auth_mode = NDIS_80211_AUTH_AUTO_SWITCH; else return -ENOTSUPP; tmp = cpu_to_le32(auth_mode); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_AUTHENTICATION_MODE, &tmp, sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n", ret); return ret; } priv->wpa_version = wpa_version; return 0; } static int set_priv_filter(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); __le32 tmp; netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n", __func__, priv->wpa_version); if (priv->wpa_version & NL80211_WPA_VERSION_2 || priv->wpa_version & NL80211_WPA_VERSION_1) tmp = cpu_to_le32(NDIS_80211_PRIV_8021X_WEP); else tmp = cpu_to_le32(NDIS_80211_PRIV_ACCEPT_ALL); return rndis_set_oid(usbdev, RNDIS_OID_802_11_PRIVACY_FILTER, &tmp, sizeof(tmp)); } static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) { __le32 tmp; int encr_mode, ret; netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n", __func__, pairwise, groupwise); if (pairwise & RNDIS_WLAN_ALG_CCMP) encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; else if (pairwise & RNDIS_WLAN_ALG_TKIP) encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; else if (pairwise & RNDIS_WLAN_ALG_WEP) encr_mode = NDIS_80211_ENCR_WEP_ENABLED; else if (groupwise & RNDIS_WLAN_ALG_CCMP) encr_mode = NDIS_80211_ENCR_CCMP_ENABLED; else if (groupwise & RNDIS_WLAN_ALG_TKIP) encr_mode = NDIS_80211_ENCR_TKIP_ENABLED; else encr_mode = NDIS_80211_ENCR_DISABLED; tmp = cpu_to_le32(encr_mode); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_ENCRYPTION_STATUS, &tmp, sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n", ret); return ret; } return 0; } static int set_infra_mode(struct usbnet *usbdev, int mode) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); __le32 tmp; int ret; netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n", __func__, priv->infra_mode); tmp = cpu_to_le32(mode); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_INFRASTRUCTURE_MODE, &tmp, sizeof(tmp)); if (ret != 0) { netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n", ret); return ret; } /* NDIS drivers clear keys when infrastructure mode is * changed. But Linux tools assume otherwise. So set the * keys */ restore_keys(usbdev); priv->infra_mode = mode; return 0; } static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold) { __le32 tmp; netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); if (rts_threshold == -1 || rts_threshold > 2347) rts_threshold = 2347; tmp = cpu_to_le32(rts_threshold); return rndis_set_oid(usbdev, RNDIS_OID_802_11_RTS_THRESHOLD, &tmp, sizeof(tmp)); } static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold) { __le32 tmp; netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold); if (frag_threshold < 256 || frag_threshold > 2346) frag_threshold = 2346; tmp = cpu_to_le32(frag_threshold); return rndis_set_oid(usbdev, RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD, &tmp, sizeof(tmp)); } static void set_default_iw_params(struct usbnet *usbdev) { set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); set_auth_mode(usbdev, 0, NL80211_AUTHTYPE_OPEN_SYSTEM, RNDIS_WLAN_KEY_MGMT_NONE); set_priv_filter(usbdev); set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); } static int deauthenticate(struct usbnet *usbdev) { int ret; ret = disassociate(usbdev, true); set_default_iw_params(usbdev); return ret; } static int set_channel(struct usbnet *usbdev, int channel) { struct ndis_80211_conf config; unsigned int dsconfig; int len, ret; netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel); /* this OID is valid only when not associated */ if (is_associated(usbdev)) return 0; dsconfig = 1000 * ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ); len = sizeof(config); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_CONFIGURATION, &config, &len); if (ret < 0) { netdev_dbg(usbdev->net, "%s(): querying configuration failed\n", __func__); return ret; } config.ds_config = cpu_to_le32(dsconfig); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_CONFIGURATION, &config, sizeof(config)); netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret); return ret; } static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev, u32 *beacon_period) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; struct ndis_80211_conf config; int len, ret; /* Get channel and beacon interval */ len = sizeof(config); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_CONFIGURATION, &config, &len); netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_CONFIGURATION -> %d\n", __func__, ret); if (ret < 0) return NULL; channel = ieee80211_get_channel(priv->wdev.wiphy, KHZ_TO_MHZ(le32_to_cpu(config.ds_config))); if (!channel) return NULL; if (beacon_period) *beacon_period = le32_to_cpu(config.beacon_period); return channel; } /* index must be 0 - N, as per NDIS */ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len, u8 index) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_wep_key ndis_key; u32 cipher; int ret; netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n", __func__, index, key_len); if (index >= RNDIS_WLAN_NUM_KEYS) return -EINVAL; if (key_len == 5) cipher = WLAN_CIPHER_SUITE_WEP40; else if (key_len == 13) cipher = WLAN_CIPHER_SUITE_WEP104; else return -EINVAL; memset(&ndis_key, 0, sizeof(ndis_key)); ndis_key.size = cpu_to_le32(sizeof(ndis_key)); ndis_key.length = cpu_to_le32(key_len); ndis_key.index = cpu_to_le32(index); memcpy(&ndis_key.material, key, key_len); if (index == priv->encr_tx_key_index) { ndis_key.index |= NDIS_80211_ADDWEP_TRANSMIT_KEY; ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP, RNDIS_WLAN_ALG_NONE); if (ret) netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n", ret); } ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_ADD_WEP, &ndis_key, sizeof(ndis_key)); if (ret != 0) { netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n", index + 1, ret); return ret; } priv->encr_keys[index].len = key_len; priv->encr_keys[index].cipher = cipher; memcpy(&priv->encr_keys[index].material, key, key_len); eth_broadcast_addr(priv->encr_keys[index].bssid); return 0; } static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len, u8 index, const u8 *addr, const u8 *rx_seq, int seq_len, u32 cipher, __le32 flags) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_key ndis_key; bool is_addr_ok; int ret; if (index >= RNDIS_WLAN_NUM_KEYS) { netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n", __func__, index); return -EINVAL; } if (key_len > sizeof(ndis_key.material) || key_len < 0) { netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n", __func__, key_len); return -EINVAL; } if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) { if (!rx_seq || seq_len <= 0) { netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n", __func__); return -EINVAL; } if (rx_seq && seq_len > sizeof(ndis_key.rsc)) { netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__); return -EINVAL; } } is_addr_ok = addr && !is_zero_ether_addr(addr) && !is_broadcast_ether_addr(addr); if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) { netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n", __func__, addr); return -EINVAL; } netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n", __func__, index, !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY), !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY), !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ)); memset(&ndis_key, 0, sizeof(ndis_key)); ndis_key.size = cpu_to_le32(sizeof(ndis_key) - sizeof(ndis_key.material) + key_len); ndis_key.length = cpu_to_le32(key_len); ndis_key.index = cpu_to_le32(index) | flags; if (cipher == WLAN_CIPHER_SUITE_TKIP && key_len == 32) { /* wpa_supplicant gives us the Michael MIC RX/TX keys in * different order than NDIS spec, so swap the order here. */ memcpy(ndis_key.material, key, 16); memcpy(ndis_key.material + 16, key + 24, 8); memcpy(ndis_key.material + 24, key + 16, 8); } else memcpy(ndis_key.material, key, key_len); if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) memcpy(ndis_key.rsc, rx_seq, seq_len); if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) { /* pairwise key */ memcpy(ndis_key.bssid, addr, ETH_ALEN); } else { /* group key */ if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) eth_broadcast_addr(ndis_key.bssid); else get_bssid(usbdev, ndis_key.bssid); } ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_ADD_KEY, &ndis_key, le32_to_cpu(ndis_key.size)); netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_ADD_KEY -> %08X\n", __func__, ret); if (ret != 0) return ret; memset(&priv->encr_keys[index], 0, sizeof(priv->encr_keys[index])); priv->encr_keys[index].len = key_len; priv->encr_keys[index].cipher = cipher; memcpy(&priv->encr_keys[index].material, key, key_len); if (flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) memcpy(&priv->encr_keys[index].bssid, ndis_key.bssid, ETH_ALEN); else eth_broadcast_addr(priv->encr_keys[index].bssid); if (flags & NDIS_80211_ADDKEY_TRANSMIT_KEY) priv->encr_tx_key_index = index; return 0; } static int restore_key(struct usbnet *usbdev, u8 key_idx) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct rndis_wlan_encr_key key; if (is_wpa_key(priv, key_idx)) return 0; key = priv->encr_keys[key_idx]; netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len); if (key.len == 0) return 0; return add_wep_key(usbdev, key.material, key.len, key_idx); } static void restore_keys(struct usbnet *usbdev) { int i; for (i = 0; i < 4; i++) restore_key(usbdev, i); } static void clear_key(struct rndis_wlan_private *priv, u8 idx) { memset(&priv->encr_keys[idx], 0, sizeof(priv->encr_keys[idx])); } /* remove_key is for both wep and wpa */ static int remove_key(struct usbnet *usbdev, u8 index, const u8 *bssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_remove_key remove_key; __le32 keyindex; bool is_wpa; int ret; if (index >= RNDIS_WLAN_NUM_KEYS) return -ENOENT; if (priv->encr_keys[index].len == 0) return 0; is_wpa = is_wpa_key(priv, index); netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n", __func__, index, is_wpa ? "wpa" : "wep", priv->encr_keys[index].len); clear_key(priv, index); if (is_wpa) { remove_key.size = cpu_to_le32(sizeof(remove_key)); remove_key.index = cpu_to_le32(index); if (bssid) { /* pairwise key */ if (!is_broadcast_ether_addr(bssid)) remove_key.index |= NDIS_80211_ADDKEY_PAIRWISE_KEY; memcpy(remove_key.bssid, bssid, sizeof(remove_key.bssid)); } else memset(remove_key.bssid, 0xff, sizeof(remove_key.bssid)); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_REMOVE_KEY, &remove_key, sizeof(remove_key)); if (ret != 0) return ret; } else { keyindex = cpu_to_le32(index); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_REMOVE_WEP, &keyindex, sizeof(keyindex)); if (ret != 0) { netdev_warn(usbdev->net, "removing encryption key %d failed (%08X)\n", index, ret); return ret; } } /* if it is transmit key, disable encryption */ if (index == priv->encr_tx_key_index) set_encr_mode(usbdev, RNDIS_WLAN_ALG_NONE, RNDIS_WLAN_ALG_NONE); return 0; } static void set_multicast_list(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct netdev_hw_addr *ha; __le32 filter, basefilter; int ret; char *mc_addrs = NULL; int mc_count; basefilter = filter = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST); if (usbdev->net->flags & IFF_PROMISC) { filter |= cpu_to_le32(RNDIS_PACKET_TYPE_PROMISCUOUS | RNDIS_PACKET_TYPE_ALL_LOCAL); } else if (usbdev->net->flags & IFF_ALLMULTI) { filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); } if (filter != basefilter) goto set_filter; /* * mc_list should be accessed holding the lock, so copy addresses to * local buffer first. */ netif_addr_lock_bh(usbdev->net); mc_count = netdev_mc_count(usbdev->net); if (mc_count > priv->multicast_size) { filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); } else if (mc_count) { int i = 0; mc_addrs = kmalloc_array(mc_count, ETH_ALEN, GFP_ATOMIC); if (!mc_addrs) { netif_addr_unlock_bh(usbdev->net); return; } netdev_for_each_mc_addr(ha, usbdev->net) memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); } netif_addr_unlock_bh(usbdev->net); if (filter != basefilter) goto set_filter; if (mc_count) { ret = rndis_set_oid(usbdev, RNDIS_OID_802_3_MULTICAST_LIST, mc_addrs, mc_count * ETH_ALEN); kfree(mc_addrs); if (ret == 0) filter |= cpu_to_le32(RNDIS_PACKET_TYPE_MULTICAST); else filter |= cpu_to_le32(RNDIS_PACKET_TYPE_ALL_MULTICAST); netdev_dbg(usbdev->net, "RNDIS_OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n", mc_count, priv->multicast_size, ret); } set_filter: ret = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter, sizeof(filter)); if (ret < 0) { netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n", le32_to_cpu(filter)); } netdev_dbg(usbdev->net, "RNDIS_OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n", le32_to_cpu(filter), ret); } #ifdef DEBUG static void debug_print_pmkids(struct usbnet *usbdev, struct ndis_80211_pmkid *pmkids, const char *func_str) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int i, len, count, max_pmkids, entry_len; max_pmkids = priv->wdev.wiphy->max_num_pmkids; len = le32_to_cpu(pmkids->length); count = le32_to_cpu(pmkids->bssid_info_count); entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1; netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: " "%d)\n", func_str, count, len, entry_len); if (count > max_pmkids) count = max_pmkids; for (i = 0; i < count; i++) { u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid; netdev_dbg(usbdev->net, "%s(): bssid: %pM, " "pmkid: %08X:%08X:%08X:%08X\n", func_str, pmkids->bssid_info[i].bssid, cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]), cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3])); } } #else static void debug_print_pmkids(struct usbnet *usbdev, struct ndis_80211_pmkid *pmkids, const char *func_str) { return; } #endif static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_pmkid *pmkids; int len, ret, max_pmkids; max_pmkids = priv->wdev.wiphy->max_num_pmkids; len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]); pmkids = kzalloc(len, GFP_KERNEL); if (!pmkids) return ERR_PTR(-ENOMEM); pmkids->length = cpu_to_le32(len); pmkids->bssid_info_count = cpu_to_le32(max_pmkids); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids, &len); if (ret < 0) { netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d)" " -> %d\n", __func__, len, max_pmkids, ret); kfree(pmkids); return ERR_PTR(ret); } if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids) pmkids->bssid_info_count = cpu_to_le32(max_pmkids); debug_print_pmkids(usbdev, pmkids, __func__); return pmkids; } static int set_device_pmkids(struct usbnet *usbdev, struct ndis_80211_pmkid *pmkids) { int ret, len, num_pmkids; num_pmkids = le32_to_cpu(pmkids->bssid_info_count); len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]); pmkids->length = cpu_to_le32(len); debug_print_pmkids(usbdev, pmkids, __func__); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, pmkids, le32_to_cpu(pmkids->length)); if (ret < 0) { netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_PMKID(%d, %d) -> %d" "\n", __func__, len, num_pmkids, ret); } kfree(pmkids); return ret; } static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev, struct ndis_80211_pmkid *pmkids, struct cfg80211_pmksa *pmksa, int max_pmkids) { int i, newlen, err; unsigned int count; count = le32_to_cpu(pmkids->bssid_info_count); if (count > max_pmkids) count = max_pmkids; for (i = 0; i < count; i++) if (ether_addr_equal(pmkids->bssid_info[i].bssid, pmksa->bssid)) break; /* pmkid not found */ if (i == count) { netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n", __func__, pmksa->bssid); err = -ENOENT; goto error; } for (; i + 1 < count; i++) pmkids->bssid_info[i] = pmkids->bssid_info[i + 1]; count--; newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]); pmkids->length = cpu_to_le32(newlen); pmkids->bssid_info_count = cpu_to_le32(count); return pmkids; error: kfree(pmkids); return ERR_PTR(err); } static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, struct ndis_80211_pmkid *pmkids, struct cfg80211_pmksa *pmksa, int max_pmkids) { struct ndis_80211_pmkid *new_pmkids; int i, err, newlen; unsigned int count; count = le32_to_cpu(pmkids->bssid_info_count); if (count > max_pmkids) count = max_pmkids; /* update with new pmkid */ for (i = 0; i < count; i++) { if (!ether_addr_equal(pmkids->bssid_info[i].bssid, pmksa->bssid)) continue; memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); return pmkids; } /* out of space, return error */ if (i == max_pmkids) { netdev_dbg(usbdev->net, "%s(): out of space\n", __func__); err = -ENOSPC; goto error; } /* add new pmkid */ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL); if (!new_pmkids) { err = -ENOMEM; goto error; } pmkids = new_pmkids; pmkids->length = cpu_to_le32(newlen); pmkids->bssid_info_count = cpu_to_le32(count + 1); memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN); memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); return pmkids; error: kfree(pmkids); return ERR_PTR(err); } /* * cfg80211 ops */ static int rndis_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; int mode; switch (type) { case NL80211_IFTYPE_ADHOC: mode = NDIS_80211_INFRA_ADHOC; break; case NL80211_IFTYPE_STATION: mode = NDIS_80211_INFRA_INFRA; break; default: return -EINVAL; } priv->wdev.iftype = type; return set_infra_mode(usbdev, mode); } static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; int err; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { err = set_frag_threshold(usbdev, wiphy->frag_threshold); if (err < 0) return err; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { err = set_rts_threshold(usbdev, wiphy->rts_threshold); if (err < 0) return err; } return 0; } static int rndis_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; netdev_dbg(usbdev->net, "%s(): type:0x%x mbm:%i\n", __func__, type, mbm); if (mbm < 0 || (mbm % 100)) return -ENOTSUPP; /* Device doesn't support changing txpower after initialization, only * turn off/on radio. Support 'auto' mode and setting same dBm that is * currently used. */ if (type == NL80211_TX_POWER_AUTOMATIC || MBM_TO_DBM(mbm) == get_bcm4320_power_dbm(priv)) { if (!priv->radio_on) disassociate(usbdev, true); /* turn on radio */ return 0; } return -ENOTSUPP; } static int rndis_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; *dbm = get_bcm4320_power_dbm(priv); netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm); return 0; } #define SCAN_DELAY_JIFFIES (6 * HZ) static int rndis_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct net_device *dev = request->wdev->netdev; struct usbnet *usbdev = netdev_priv(dev); struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int ret; int delay = SCAN_DELAY_JIFFIES; netdev_dbg(usbdev->net, "cfg80211.scan\n"); /* Get current bssid list from device before new scan, as new scan * clears internal bssid list. */ rndis_check_bssid_list(usbdev, NULL, NULL); if (priv->scan_request && priv->scan_request != request) return -EBUSY; priv->scan_request = request; ret = rndis_start_bssid_list_scan(usbdev); if (ret == 0) { if (priv->device_type == RNDIS_BCM4320A) delay = HZ; /* Wait before retrieving scan results from device */ queue_delayed_work(priv->workqueue, &priv->scan_work, delay); } return ret; } static bool rndis_bss_info_update(struct usbnet *usbdev, struct ndis_80211_bssid_ex *bssid) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; u16 beacon_interval; struct ndis_80211_fixed_ies *fixed; int ie_len, bssid_len; u8 *ie; netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM], len: %d\n", bssid->ssid.essid, bssid->mac, le32_to_cpu(bssid->length)); /* parse bssid structure */ bssid_len = le32_to_cpu(bssid->length); if (bssid_len < sizeof(struct ndis_80211_bssid_ex) + sizeof(struct ndis_80211_fixed_ies)) return NULL; fixed = (struct ndis_80211_fixed_ies *)bssid->ies; ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies)); ie_len = min(bssid_len - (int)sizeof(*bssid), (int)le32_to_cpu(bssid->ie_length)); ie_len -= sizeof(struct ndis_80211_fixed_ies); if (ie_len < 0) return NULL; /* extract data for cfg80211_inform_bss */ channel = ieee80211_get_channel(priv->wdev.wiphy, KHZ_TO_MHZ(le32_to_cpu(bssid->config.ds_config))); if (!channel) return NULL; signal = level_to_qual(le32_to_cpu(bssid->rssi)); timestamp = le64_to_cpu(*(__le64 *)fixed->timestamp); capability = le16_to_cpu(fixed->capabilities); beacon_interval = le16_to_cpu(fixed->beacon_interval); bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bssid->mac, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); return (bss != NULL); } static struct ndis_80211_bssid_ex *next_bssid_list_item( struct ndis_80211_bssid_ex *bssid, int *bssid_len, void *buf, int len) { void *buf_end, *bssid_end; buf_end = (char *)buf + len; bssid_end = (char *)bssid + *bssid_len; if ((int)(buf_end - bssid_end) < sizeof(bssid->length)) { *bssid_len = 0; return NULL; } else { bssid = (void *)((char *)bssid + *bssid_len); *bssid_len = le32_to_cpu(bssid->length); return bssid; } } static bool check_bssid_list_item(struct ndis_80211_bssid_ex *bssid, int bssid_len, void *buf, int len) { void *buf_end, *bssid_end; if (!bssid || bssid_len <= 0 || bssid_len > len) return false; buf_end = (char *)buf + len; bssid_end = (char *)bssid + bssid_len; return (int)(buf_end - bssid_end) >= 0 && (int)(bssid_end - buf) >= 0; } static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid, bool *matched) { void *buf = NULL; struct ndis_80211_bssid_list_ex *bssid_list; struct ndis_80211_bssid_ex *bssid; int ret = -EINVAL, len, count, bssid_len, real_count, new_len; netdev_dbg(usbdev->net, "%s()\n", __func__); len = CONTROL_BUFFER_SIZE; resize_buf: buf = kzalloc(len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } /* BSSID-list might have got bigger last time we checked, keep * resizing until it won't get any bigger. */ new_len = len; ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST, buf, &new_len); if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex)) goto out; if (new_len > len) { len = new_len; kfree(buf); goto resize_buf; } len = new_len; bssid_list = buf; count = le32_to_cpu(bssid_list->num_items); real_count = 0; netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len); bssid_len = 0; bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len); /* Device returns incorrect 'num_items'. Workaround by ignoring the * received 'num_items' and walking through full bssid buffer instead. */ while (check_bssid_list_item(bssid, bssid_len, buf, len)) { if (rndis_bss_info_update(usbdev, bssid) && match_bssid && matched) { if (ether_addr_equal(bssid->mac, match_bssid)) *matched = true; } real_count++; bssid = next_bssid_list_item(bssid, &bssid_len, buf, len); } netdev_dbg(usbdev->net, "%s(): num_items from device: %d, really found:" " %d\n", __func__, count, real_count); out: kfree(buf); return ret; } static void rndis_get_scan_results(struct work_struct *work) { struct rndis_wlan_private *priv = container_of(work, struct rndis_wlan_private, scan_work.work); struct usbnet *usbdev = priv->usbdev; int ret; netdev_dbg(usbdev->net, "get_scan_results\n"); if (!priv->scan_request) return; ret = rndis_check_bssid_list(usbdev, NULL, NULL); cfg80211_scan_done(priv->scan_request, ret < 0); priv->scan_request = NULL; } static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct ieee80211_channel *channel = sme->channel; struct ndis_80211_ssid ssid; int pairwise = RNDIS_WLAN_ALG_NONE; int groupwise = RNDIS_WLAN_ALG_NONE; int keymgmt = RNDIS_WLAN_KEY_MGMT_NONE; int length, i, ret, chan = -1; if (channel) chan = ieee80211_frequency_to_channel(channel->center_freq); groupwise = rndis_cipher_to_alg(sme->crypto.cipher_group); for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) pairwise |= rndis_cipher_to_alg(sme->crypto.ciphers_pairwise[i]); if (sme->crypto.n_ciphers_pairwise > 0 && pairwise == RNDIS_WLAN_ALG_NONE) { netdev_err(usbdev->net, "Unsupported pairwise cipher\n"); return -ENOTSUPP; } for (i = 0; i < sme->crypto.n_akm_suites; i++) keymgmt |= rndis_akm_suite_to_key_mgmt(sme->crypto.akm_suites[i]); if (sme->crypto.n_akm_suites > 0 && keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) { netdev_err(usbdev->net, "Invalid keymgmt\n"); return -ENOTSUPP; } netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n", sme->ssid, sme->bssid, chan, sme->privacy, sme->crypto.wpa_versions, sme->auth_type, groupwise, pairwise, keymgmt); if (is_associated(usbdev)) disassociate(usbdev, false); ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA); if (ret < 0) { netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n", ret); goto err_turn_radio_on; } ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type, keymgmt); if (ret < 0) { netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n", ret); goto err_turn_radio_on; } set_priv_filter(usbdev); ret = set_encr_mode(usbdev, pairwise, groupwise); if (ret < 0) { netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n", ret); goto err_turn_radio_on; } if (channel) { ret = set_channel(usbdev, chan); if (ret < 0) { netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n", ret); goto err_turn_radio_on; } } if (sme->key && ((groupwise | pairwise) & RNDIS_WLAN_ALG_WEP)) { priv->encr_tx_key_index = sme->key_idx; ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx); if (ret < 0) { netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n", ret, sme->key_len, sme->key_idx); goto err_turn_radio_on; } } if (sme->bssid && !is_zero_ether_addr(sme->bssid) && !is_broadcast_ether_addr(sme->bssid)) { ret = set_bssid(usbdev, sme->bssid); if (ret < 0) { netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n", ret); goto err_turn_radio_on; } } else clear_bssid(usbdev); length = sme->ssid_len; if (length > NDIS_802_11_LENGTH_SSID) length = NDIS_802_11_LENGTH_SSID; memset(&ssid, 0, sizeof(ssid)); ssid.length = cpu_to_le32(length); memcpy(ssid.essid, sme->ssid, length); /* Pause and purge rx queue, so we don't pass packets before * 'media connect'-indication. */ usbnet_pause_rx(usbdev); usbnet_purge_paused_rxq(usbdev); ret = set_essid(usbdev, &ssid); if (ret < 0) netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret); return ret; err_turn_radio_on: disassociate(usbdev, true); return ret; } static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code); priv->connected = false; eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct ieee80211_channel *channel = params->chandef.chan; struct ndis_80211_ssid ssid; enum nl80211_auth_type auth_type; int ret, alg, length, chan = -1; if (channel) chan = ieee80211_frequency_to_channel(channel->center_freq); /* TODO: How to handle ad-hoc encryption? * connect() has *key, join_ibss() doesn't. RNDIS requires key to be * pre-shared for encryption (open/shared/wpa), is key set before * join_ibss? Which auth_type to use (not in params)? What about WPA? */ if (params->privacy) { auth_type = NL80211_AUTHTYPE_SHARED_KEY; alg = RNDIS_WLAN_ALG_WEP; } else { auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; alg = RNDIS_WLAN_ALG_NONE; } netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n", params->ssid, params->bssid, chan, params->privacy); if (is_associated(usbdev)) disassociate(usbdev, false); ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC); if (ret < 0) { netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n", ret); goto err_turn_radio_on; } ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE); if (ret < 0) { netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n", ret); goto err_turn_radio_on; } set_priv_filter(usbdev); ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE); if (ret < 0) { netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n", ret); goto err_turn_radio_on; } if (channel) { ret = set_channel(usbdev, chan); if (ret < 0) { netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n", ret); goto err_turn_radio_on; } } if (params->bssid && !is_zero_ether_addr(params->bssid) && !is_broadcast_ether_addr(params->bssid)) { ret = set_bssid(usbdev, params->bssid); if (ret < 0) { netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n", ret); goto err_turn_radio_on; } } else clear_bssid(usbdev); length = params->ssid_len; if (length > NDIS_802_11_LENGTH_SSID) length = NDIS_802_11_LENGTH_SSID; memset(&ssid, 0, sizeof(ssid)); ssid.length = cpu_to_le32(length); memcpy(ssid.essid, params->ssid, length); /* Don't need to pause rx queue for ad-hoc. */ usbnet_purge_paused_rxq(usbdev); usbnet_resume_rx(usbdev); ret = set_essid(usbdev, &ssid); if (ret < 0) netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n", ret); return ret; err_turn_radio_on: disassociate(usbdev, true); return ret; } static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n"); priv->connected = false; eth_zero_addr(priv->bssid); return deauthenticate(usbdev); } static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; __le32 flags; netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n", __func__, key_index, mac_addr, params->cipher); switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return add_wep_key(usbdev, params->key, params->key_len, key_index); case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: flags = 0; if (params->seq && params->seq_len > 0) flags |= NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ; if (mac_addr) flags |= NDIS_80211_ADDKEY_PAIRWISE_KEY | NDIS_80211_ADDKEY_TRANSMIT_KEY; return add_wpa_key(usbdev, params->key, params->key_len, key_index, mac_addr, params->seq, params->seq_len, params->cipher, flags); default: netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n", __func__, params->cipher); return -ENOTSUPP; } } static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr); return remove_key(usbdev, key_index, mac_addr); } static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct rndis_wlan_encr_key key; netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index); if (key_index >= RNDIS_WLAN_NUM_KEYS) return -ENOENT; priv->encr_tx_key_index = key_index; if (is_wpa_key(priv, key_index)) return 0; key = priv->encr_keys[key_index]; return add_wep_key(usbdev, key.material, key.len, key_index); } static void rndis_fill_station_info(struct usbnet *usbdev, struct station_info *sinfo) { __le32 linkspeed, rssi; int ret, len; memset(sinfo, 0, sizeof(*sinfo)); len = sizeof(linkspeed); ret = rndis_query_oid(usbdev, RNDIS_OID_GEN_LINK_SPEED, &linkspeed, &len); if (ret == 0) { sinfo->txrate.legacy = le32_to_cpu(linkspeed) / 1000; sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE); } len = sizeof(rssi); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, &rssi, &len); if (ret == 0) { sinfo->signal = level_to_qual(le32_to_cpu(rssi)); sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); } } static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; if (!ether_addr_equal(priv->bssid, mac)) return -ENOENT; rndis_fill_station_info(usbdev, sinfo); return 0; } static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; if (idx != 0) return -ENOENT; memcpy(mac, priv->bssid, ETH_ALEN); rndis_fill_station_info(usbdev, sinfo); return 0; } static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct ndis_80211_pmkid *pmkids; u32 *tmp = (u32 *)pmksa->pmkid; netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__, pmksa->bssid, cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]), cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3])); pmkids = get_device_pmkids(usbdev); if (IS_ERR(pmkids)) { /* couldn't read PMKID cache from device */ return PTR_ERR(pmkids); } pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids); if (IS_ERR(pmkids)) { /* not found, list full, etc */ return PTR_ERR(pmkids); } return set_device_pmkids(usbdev, pmkids); } static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct ndis_80211_pmkid *pmkids; u32 *tmp = (u32 *)pmksa->pmkid; netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__, pmksa->bssid, cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]), cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3])); pmkids = get_device_pmkids(usbdev); if (IS_ERR(pmkids)) { /* Couldn't read PMKID cache from device */ return PTR_ERR(pmkids); } pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids); if (IS_ERR(pmkids)) { /* not found, etc */ return PTR_ERR(pmkids); } return set_device_pmkids(usbdev, pmkids); } static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; struct ndis_80211_pmkid pmkid; netdev_dbg(usbdev->net, "%s()\n", __func__); memset(&pmkid, 0, sizeof(pmkid)); pmkid.length = cpu_to_le32(sizeof(pmkid)); pmkid.bssid_info_count = cpu_to_le32(0); return rndis_set_oid(usbdev, RNDIS_OID_802_11_PMKID, &pmkid, sizeof(pmkid)); } static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); struct usbnet *usbdev = priv->usbdev; int power_mode; __le32 mode; int ret; if (priv->device_type != RNDIS_BCM4320B) return -ENOTSUPP; netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, enabled ? "enabled" : "disabled", timeout); if (enabled) power_mode = NDIS_80211_POWER_MODE_FAST_PSP; else power_mode = NDIS_80211_POWER_MODE_CAM; if (power_mode == priv->power_mode) return 0; priv->power_mode = power_mode; mode = cpu_to_le32(power_mode); ret = rndis_set_oid(usbdev, RNDIS_OID_802_11_POWER_MODE, &mode, sizeof(mode)); netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_POWER_MODE -> %d\n", __func__, ret); return ret; } static int rndis_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { struct rndis_wlan_private *priv = wiphy_priv(wiphy); priv->cqm_rssi_thold = rssi_thold; priv->cqm_rssi_hyst = rssi_hyst; priv->last_cqm_event_rssi = 0; return 0; } static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, struct ndis_80211_assoc_info *info) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ieee80211_channel *channel; struct ndis_80211_ssid ssid; struct cfg80211_bss *bss; s32 signal; u64 timestamp; u16 capability; u32 beacon_period = 0; __le32 rssi; u8 ie_buf[34]; int len, ret, ie_len; /* Get signal quality, in case of error use rssi=0 and ignore error. */ len = sizeof(rssi); rssi = 0; ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, &rssi, &len); signal = level_to_qual(le32_to_cpu(rssi)); netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_RSSI -> %d, " "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); /* Get AP capabilities */ if (info) { capability = le16_to_cpu(info->resp_ie.capa); } else { /* Set atleast ESS/IBSS capability */ capability = (priv->infra_mode == NDIS_80211_INFRA_INFRA) ? WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS; } /* Get channel and beacon interval */ channel = get_current_channel(usbdev, &beacon_period); if (!channel) { netdev_warn(usbdev->net, "%s(): could not get channel.\n", __func__); return; } /* Get SSID, in case of error, use zero length SSID and ignore error. */ len = sizeof(ssid); memset(&ssid, 0, sizeof(ssid)); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_SSID, &ssid, &len); netdev_dbg(usbdev->net, "%s(): RNDIS_OID_802_11_SSID -> %d, len: %d, ssid: " "'%.32s'\n", __func__, ret, le32_to_cpu(ssid.length), ssid.essid); if (le32_to_cpu(ssid.length) > 32) ssid.length = cpu_to_le32(32); ie_buf[0] = WLAN_EID_SSID; ie_buf[1] = le32_to_cpu(ssid.length); memcpy(&ie_buf[2], ssid.essid, le32_to_cpu(ssid.length)); ie_len = le32_to_cpu(ssid.length) + 2; /* no tsf */ timestamp = 0; netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, " "capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), " "signal:%d\n", __func__, (channel ? channel->center_freq : -1), bssid, (u32)timestamp, capability, beacon_period, ie_len, ssid.essid, signal); bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bssid, timestamp, capability, beacon_period, ie_buf, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); } /* * workers, indication handlers, device poller */ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct ndis_80211_assoc_info *info = NULL; u8 bssid[ETH_ALEN]; unsigned int resp_ie_len, req_ie_len; unsigned int offset; u8 *req_ie, *resp_ie; int ret; bool roamed = false; bool match_bss; if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) { /* received media connect indication while connected, either * device reassociated with same AP or roamed to new. */ roamed = true; } req_ie_len = 0; resp_ie_len = 0; req_ie = NULL; resp_ie = NULL; if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { info = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL); if (!info) { /* No memory? Try resume work later */ set_bit(WORK_LINK_UP, &priv->work_pending); queue_work(priv->workqueue, &priv->work); return; } /* Get association info IEs from device. */ ret = get_association_info(usbdev, info, CONTROL_BUFFER_SIZE); if (!ret) { req_ie_len = le32_to_cpu(info->req_ie_length); if (req_ie_len > CONTROL_BUFFER_SIZE) req_ie_len = CONTROL_BUFFER_SIZE; if (req_ie_len != 0) { offset = le32_to_cpu(info->offset_req_ies); if (offset > CONTROL_BUFFER_SIZE) offset = CONTROL_BUFFER_SIZE; req_ie = (u8 *)info + offset; if (offset + req_ie_len > CONTROL_BUFFER_SIZE) req_ie_len = CONTROL_BUFFER_SIZE - offset; } resp_ie_len = le32_to_cpu(info->resp_ie_length); if (resp_ie_len > CONTROL_BUFFER_SIZE) resp_ie_len = CONTROL_BUFFER_SIZE; if (resp_ie_len != 0) { offset = le32_to_cpu(info->offset_resp_ies); if (offset > CONTROL_BUFFER_SIZE) offset = CONTROL_BUFFER_SIZE; resp_ie = (u8 *)info + offset; if (offset + resp_ie_len > CONTROL_BUFFER_SIZE) resp_ie_len = CONTROL_BUFFER_SIZE - offset; } } else { /* Since rndis_wlan_craft_connected_bss() might use info * later and expects info to contain valid data if * non-null, free info and set NULL here. */ kfree(info); info = NULL; } } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) return; ret = get_bssid(usbdev, bssid); if (ret < 0) memset(bssid, 0, sizeof(bssid)); netdev_dbg(usbdev->net, "link up work: [%pM]%s\n", bssid, roamed ? " roamed" : ""); /* Internal bss list in device should contain at least the currently * connected bss and we can get it to cfg80211 with * rndis_check_bssid_list(). * * NDIS spec says: "If the device is associated, but the associated * BSSID is not in its BSSID scan list, then the driver must add an * entry for the BSSID at the end of the data that it returns in * response to query of RNDIS_OID_802_11_BSSID_LIST." * * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a. */ match_bss = false; rndis_check_bssid_list(usbdev, bssid, &match_bss); if (!is_zero_ether_addr(bssid) && !match_bss) { /* Couldn't get bss from device, we need to manually craft bss * for cfg80211. */ rndis_wlan_craft_connected_bss(usbdev, bssid, info); } if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { if (!roamed) cfg80211_connect_result(usbdev->net, bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); else cfg80211_roamed(usbdev->net, get_current_channel(usbdev, NULL), bssid, req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, get_current_channel(usbdev, NULL), GFP_KERNEL); kfree(info); priv->connected = true; memcpy(priv->bssid, bssid, ETH_ALEN); usbnet_resume_rx(usbdev); netif_carrier_on(usbdev->net); } static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); if (priv->connected) { priv->connected = false; eth_zero_addr(priv->bssid); deauthenticate(usbdev); cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL); } netif_carrier_off(usbdev->net); } static void rndis_wlan_worker(struct work_struct *work) { struct rndis_wlan_private *priv = container_of(work, struct rndis_wlan_private, work); struct usbnet *usbdev = priv->usbdev; if (test_and_clear_bit(WORK_LINK_UP, &priv->work_pending)) rndis_wlan_do_link_up_work(usbdev); if (test_and_clear_bit(WORK_LINK_DOWN, &priv->work_pending)) rndis_wlan_do_link_down_work(usbdev); if (test_and_clear_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) set_multicast_list(usbdev); } static void rndis_wlan_set_multicast_list(struct net_device *dev) { struct usbnet *usbdev = netdev_priv(dev); struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) return; set_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending); queue_work(priv->workqueue, &priv->work); } static void rndis_wlan_auth_indication(struct usbnet *usbdev, struct ndis_80211_status_indication *indication, int len) { u8 *buf; const char *type; int flags, buflen, key_id; bool pairwise_error, group_error; struct ndis_80211_auth_request *auth_req; enum nl80211_key_type key_type; /* must have at least one array entry */ if (len < offsetof(struct ndis_80211_status_indication, u) + sizeof(struct ndis_80211_auth_request)) { netdev_info(usbdev->net, "authentication indication: too short message (%i)\n", len); return; } buf = (void *)&indication->u.auth_request[0]; buflen = len - offsetof(struct ndis_80211_status_indication, u); while (buflen >= sizeof(*auth_req)) { auth_req = (void *)buf; type = "unknown"; flags = le32_to_cpu(auth_req->flags); pairwise_error = false; group_error = false; if (flags & 0x1) type = "reauth request"; if (flags & 0x2) type = "key update request"; if (flags & 0x6) { pairwise_error = true; type = "pairwise_error"; } if (flags & 0xe) { group_error = true; type = "group_error"; } netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n", type, le32_to_cpu(auth_req->flags)); if (pairwise_error) { key_type = NL80211_KEYTYPE_PAIRWISE; key_id = -1; cfg80211_michael_mic_failure(usbdev->net, auth_req->bssid, key_type, key_id, NULL, GFP_KERNEL); } if (group_error) { key_type = NL80211_KEYTYPE_GROUP; key_id = -1; cfg80211_michael_mic_failure(usbdev->net, auth_req->bssid, key_type, key_id, NULL, GFP_KERNEL); } buflen -= le32_to_cpu(auth_req->length); buf += le32_to_cpu(auth_req->length); } } static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev, struct ndis_80211_status_indication *indication, int len) { struct ndis_80211_pmkid_cand_list *cand_list; int list_len, expected_len, i; if (len < offsetof(struct ndis_80211_status_indication, u) + sizeof(struct ndis_80211_pmkid_cand_list)) { netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n", len); return; } list_len = le32_to_cpu(indication->u.cand_list.num_candidates) * sizeof(struct ndis_80211_pmkid_candidate); expected_len = sizeof(struct ndis_80211_pmkid_cand_list) + list_len + offsetof(struct ndis_80211_status_indication, u); if (len < expected_len) { netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n", len, expected_len); return; } cand_list = &indication->u.cand_list; netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n", le32_to_cpu(cand_list->version), le32_to_cpu(cand_list->num_candidates)); if (le32_to_cpu(cand_list->version) != 1) return; for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) { struct ndis_80211_pmkid_candidate *cand = &cand_list->candidate_list[i]; bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH); netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n", i, le32_to_cpu(cand->flags), preauth, cand->bssid); cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid, preauth, GFP_ATOMIC); } } static void rndis_wlan_media_specific_indication(struct usbnet *usbdev, struct rndis_indicate *msg, int buflen) { struct ndis_80211_status_indication *indication; unsigned int len, offset; offset = offsetof(struct rndis_indicate, status) + le32_to_cpu(msg->offset); len = le32_to_cpu(msg->length); if (len < 8) { netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n", len); return; } if (len > buflen || offset > buflen || offset + len > buflen) { netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n", offset + len, buflen); return; } indication = (void *)((u8 *)msg + offset); switch (le32_to_cpu(indication->status_type)) { case NDIS_80211_STATUSTYPE_RADIOSTATE: netdev_info(usbdev->net, "radio state indication: %i\n", le32_to_cpu(indication->u.radio_status)); return; case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE: netdev_info(usbdev->net, "media stream mode indication: %i\n", le32_to_cpu(indication->u.media_stream_mode)); return; case NDIS_80211_STATUSTYPE_AUTHENTICATION: rndis_wlan_auth_indication(usbdev, indication, len); return; case NDIS_80211_STATUSTYPE_PMKID_CANDIDATELIST: rndis_wlan_pmkid_cand_list_indication(usbdev, indication, len); return; default: netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n", le32_to_cpu(indication->status_type)); } } static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); struct rndis_indicate *msg = ind; switch (le32_to_cpu(msg->status)) { case RNDIS_STATUS_MEDIA_CONNECT: if (priv->current_command_oid == RNDIS_OID_802_11_ADD_KEY) { /* RNDIS_OID_802_11_ADD_KEY causes sometimes extra * "media connect" indications which confuses driver * and userspace to think that device is * roaming/reassociating when it isn't. */ netdev_dbg(usbdev->net, "ignored RNDIS_OID_802_11_ADD_KEY triggered 'media connect'\n"); return; } usbnet_pause_rx(usbdev); netdev_info(usbdev->net, "media connect\n"); /* queue work to avoid recursive calls into rndis_command */ set_bit(WORK_LINK_UP, &priv->work_pending); queue_work(priv->workqueue, &priv->work); break; case RNDIS_STATUS_MEDIA_DISCONNECT: netdev_info(usbdev->net, "media disconnect\n"); /* queue work to avoid recursive calls into rndis_command */ set_bit(WORK_LINK_DOWN, &priv->work_pending); queue_work(priv->workqueue, &priv->work); break; case RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION: rndis_wlan_media_specific_indication(usbdev, msg, buflen); break; default: netdev_info(usbdev->net, "indication: 0x%08x\n", le32_to_cpu(msg->status)); break; } } static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy) { struct { __le32 num_items; __le32 items[8]; } networks_supported; struct ndis_80211_capability *caps; u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16]; int len, retval, i, n; struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); /* determine supported modes */ len = sizeof(networks_supported); retval = rndis_query_oid(usbdev, RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED, &networks_supported, &len); if (retval >= 0) { n = le32_to_cpu(networks_supported.num_items); if (n > 8) n = 8; for (i = 0; i < n; i++) { switch (le32_to_cpu(networks_supported.items[i])) { case NDIS_80211_TYPE_FREQ_HOP: case NDIS_80211_TYPE_DIRECT_SEQ: priv->caps |= CAP_MODE_80211B; break; case NDIS_80211_TYPE_OFDM_A: priv->caps |= CAP_MODE_80211A; break; case NDIS_80211_TYPE_OFDM_G: priv->caps |= CAP_MODE_80211G; break; } } } /* get device 802.11 capabilities, number of PMKIDs */ caps = (struct ndis_80211_capability *)caps_buf; len = sizeof(caps_buf); retval = rndis_query_oid(usbdev, RNDIS_OID_802_11_CAPABILITY, caps, &len); if (retval >= 0) { netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, " "ver %d, pmkids %d, auth-encr-pairs %d\n", le32_to_cpu(caps->length), le32_to_cpu(caps->version), le32_to_cpu(caps->num_pmkids), le32_to_cpu(caps->num_auth_encr_pair)); wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids); } else wiphy->max_num_pmkids = 0; return retval; } static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); enum nl80211_cqm_rssi_threshold_event event; int thold, hyst, last_event; if (priv->cqm_rssi_thold >= 0 || rssi >= 0) return; if (priv->infra_mode != NDIS_80211_INFRA_INFRA) return; last_event = priv->last_cqm_event_rssi; thold = priv->cqm_rssi_thold; hyst = priv->cqm_rssi_hyst; if (rssi < thold && (last_event == 0 || rssi < last_event - hyst)) event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; else if (rssi > thold && (last_event == 0 || rssi > last_event + hyst)) event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; else return; priv->last_cqm_event_rssi = rssi; cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL); } #define DEVICE_POLLER_JIFFIES (HZ) static void rndis_device_poller(struct work_struct *work) { struct rndis_wlan_private *priv = container_of(work, struct rndis_wlan_private, dev_poller_work.work); struct usbnet *usbdev = priv->usbdev; __le32 rssi, tmp; int len, ret, j; int update_jiffies = DEVICE_POLLER_JIFFIES; void *buf; /* Only check/do workaround when connected. Calling is_associated() * also polls device with rndis_command() and catches for media link * indications. */ if (!is_associated(usbdev)) { /* Workaround bad scanning in BCM4320a devices with active * background scanning when not associated. */ if (priv->device_type == RNDIS_BCM4320A && priv->radio_on && !priv->scan_request) { /* Get previous scan results */ rndis_check_bssid_list(usbdev, NULL, NULL); /* Initiate new scan */ rndis_start_bssid_list_scan(usbdev); } goto end; } len = sizeof(rssi); ret = rndis_query_oid(usbdev, RNDIS_OID_802_11_RSSI, &rssi, &len); if (ret == 0) { priv->last_qual = level_to_qual(le32_to_cpu(rssi)); rndis_do_cqm(usbdev, le32_to_cpu(rssi)); } netdev_dbg(usbdev->net, "dev-poller: RNDIS_OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); /* Workaround transfer stalls on poor quality links. * TODO: find right way to fix these stalls (as stalls do not happen * with ndiswrapper/windows driver). */ if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) { /* Decrease stats worker interval to catch stalls. * faster. Faster than 400-500ms causes packet loss, * Slower doesn't catch stalls fast enough. */ j = msecs_to_jiffies(priv->param_workaround_interval); if (j > DEVICE_POLLER_JIFFIES) j = DEVICE_POLLER_JIFFIES; else if (j <= 0) j = 1; update_jiffies = j; /* Send scan OID. Use of both OIDs is required to get device * working. */ tmp = cpu_to_le32(1); rndis_set_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST_SCAN, &tmp, sizeof(tmp)); len = CONTROL_BUFFER_SIZE; buf = kmalloc(len, GFP_KERNEL); if (!buf) goto end; rndis_query_oid(usbdev, RNDIS_OID_802_11_BSSID_LIST, buf, &len); kfree(buf); } end: if (update_jiffies >= HZ) update_jiffies = round_jiffies_relative(update_jiffies); else { j = round_jiffies_relative(update_jiffies); if (abs(j - update_jiffies) <= 10) update_jiffies = j; } queue_delayed_work(priv->workqueue, &priv->dev_poller_work, update_jiffies); } /* * driver/device initialization */ static void rndis_copy_module_params(struct usbnet *usbdev, int device_type) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); priv->device_type = device_type; priv->param_country[0] = modparam_country[0]; priv->param_country[1] = modparam_country[1]; priv->param_country[2] = 0; priv->param_frameburst = modparam_frameburst; priv->param_afterburner = modparam_afterburner; priv->param_power_save = modparam_power_save; priv->param_power_output = modparam_power_output; priv->param_roamtrigger = modparam_roamtrigger; priv->param_roamdelta = modparam_roamdelta; priv->param_country[0] = toupper(priv->param_country[0]); priv->param_country[1] = toupper(priv->param_country[1]); /* doesn't support EU as country code, use FI instead */ if (!strcmp(priv->param_country, "EU")) strcpy(priv->param_country, "FI"); if (priv->param_power_save < 0) priv->param_power_save = 0; else if (priv->param_power_save > 2) priv->param_power_save = 2; if (priv->param_power_output < 0) priv->param_power_output = 0; else if (priv->param_power_output > 3) priv->param_power_output = 3; if (priv->param_roamtrigger < -80) priv->param_roamtrigger = -80; else if (priv->param_roamtrigger > -60) priv->param_roamtrigger = -60; if (priv->param_roamdelta < 0) priv->param_roamdelta = 0; else if (priv->param_roamdelta > 2) priv->param_roamdelta = 2; if (modparam_workaround_interval < 0) priv->param_workaround_interval = 500; else priv->param_workaround_interval = modparam_workaround_interval; } static int unknown_early_init(struct usbnet *usbdev) { /* copy module parameters for unknown so that iwconfig reports txpower * and workaround parameter is copied to private structure correctly. */ rndis_copy_module_params(usbdev, RNDIS_UNKNOWN); /* This is unknown device, so do not try set configuration parameters. */ return 0; } static int bcm4320a_early_init(struct usbnet *usbdev) { /* copy module parameters for bcm4320a so that iwconfig reports txpower * and workaround parameter is copied to private structure correctly. */ rndis_copy_module_params(usbdev, RNDIS_BCM4320A); /* bcm4320a doesn't handle configuration parameters well. Try * set any and you get partially zeroed mac and broken device. */ return 0; } static int bcm4320b_early_init(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); char buf[8]; rndis_copy_module_params(usbdev, RNDIS_BCM4320B); /* Early initialization settings, setting these won't have effect * if called after generic_rndis_bind(). */ rndis_set_config_parameter_str(usbdev, "Country", priv->param_country); rndis_set_config_parameter_str(usbdev, "FrameBursting", priv->param_frameburst ? "1" : "0"); rndis_set_config_parameter_str(usbdev, "Afterburner", priv->param_afterburner ? "1" : "0"); sprintf(buf, "%d", priv->param_power_save); rndis_set_config_parameter_str(usbdev, "PowerSaveMode", buf); sprintf(buf, "%d", priv->param_power_output); rndis_set_config_parameter_str(usbdev, "PwrOut", buf); sprintf(buf, "%d", priv->param_roamtrigger); rndis_set_config_parameter_str(usbdev, "RoamTrigger", buf); sprintf(buf, "%d", priv->param_roamdelta); rndis_set_config_parameter_str(usbdev, "RoamDelta", buf); return 0; } /* same as rndis_netdev_ops but with local multicast handler */ static const struct net_device_ops rndis_wlan_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = rndis_wlan_set_multicast_list, }; static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) { struct wiphy *wiphy; struct rndis_wlan_private *priv; int retval, len; __le32 tmp; /* allocate wiphy and rndis private data * NOTE: We only support a single virtual interface, so wiphy * and wireless_dev are somewhat synonymous for this device. */ wiphy = wiphy_new(&rndis_config_ops, sizeof(struct rndis_wlan_private)); if (!wiphy) return -ENOMEM; priv = wiphy_priv(wiphy); usbdev->net->ieee80211_ptr = &priv->wdev; priv->wdev.wiphy = wiphy; priv->wdev.iftype = NL80211_IFTYPE_STATION; /* These have to be initialized before calling generic_rndis_bind(). * Otherwise we'll be in big trouble in rndis_wlan_early_init(). */ usbdev->driver_priv = priv; priv->usbdev = usbdev; mutex_init(&priv->command_lock); /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); INIT_WORK(&priv->work, rndis_wlan_worker); INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); /* try bind rndis_host */ retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); if (retval < 0) goto fail; /* generic_rndis_bind set packet filter to multicast_all+ * promisc mode which doesn't work well for our devices (device * picks up rssi to closest station instead of to access point). * * rndis_host wants to avoid all OID as much as possible * so do promisc/multicast handling in rndis_wlan. */ usbdev->net->netdev_ops = &rndis_wlan_netdev_ops; tmp = cpu_to_le32(RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST); retval = rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &tmp, sizeof(tmp)); len = sizeof(tmp); retval = rndis_query_oid(usbdev, RNDIS_OID_802_3_MAXIMUM_LIST_SIZE, &tmp, &len); priv->multicast_size = le32_to_cpu(tmp); if (retval < 0 || priv->multicast_size < 0) priv->multicast_size = 0; if (priv->multicast_size > 0) usbdev->net->flags |= IFF_MULTICAST; else usbdev->net->flags &= ~IFF_MULTICAST; /* fill-out wiphy structure and register w/ cfg80211 */ memcpy(wiphy->perm_addr, usbdev->net->dev_addr, ETH_ALEN); wiphy->privid = rndis_wiphy_privid; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wiphy->max_scan_ssids = 1; /* TODO: fill-out band/encr information based on priv->caps */ rndis_wlan_get_caps(usbdev, wiphy); memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); priv->band.channels = priv->channels; priv->band.n_channels = ARRAY_SIZE(rndis_channels); priv->band.bitrates = priv->rates; priv->band.n_bitrates = ARRAY_SIZE(rndis_rates); wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; memcpy(priv->cipher_suites, rndis_cipher_suites, sizeof(rndis_cipher_suites)); wiphy->cipher_suites = priv->cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(rndis_cipher_suites); set_wiphy_dev(wiphy, &usbdev->udev->dev); if (wiphy_register(wiphy)) { retval = -ENODEV; goto fail; } set_default_iw_params(usbdev); priv->power_mode = -1; /* set default rts/frag */ rndis_set_wiphy_params(wiphy, WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD); /* turn radio off on init */ priv->radio_on = false; disassociate(usbdev, false); netif_carrier_off(usbdev->net); return 0; fail: cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); wiphy_free(wiphy); return retval; } static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); /* turn radio off */ disassociate(usbdev, false); cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); rndis_unbind(usbdev, intf); wiphy_unregister(priv->wdev.wiphy); wiphy_free(priv->wdev.wiphy); } static int rndis_wlan_reset(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int retval; netdev_dbg(usbdev->net, "%s()\n", __func__); retval = rndis_reset(usbdev); if (retval) netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval); /* rndis_reset cleared multicast list, so restore here. (set_multicast_list() also turns on current packet filter) */ set_multicast_list(usbdev); queue_delayed_work(priv->workqueue, &priv->dev_poller_work, round_jiffies_relative(DEVICE_POLLER_JIFFIES)); return deauthenticate(usbdev); } static int rndis_wlan_stop(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); int retval; __le32 filter; netdev_dbg(usbdev->net, "%s()\n", __func__); retval = disassociate(usbdev, false); priv->work_pending = 0; cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); flush_workqueue(priv->workqueue); if (priv->scan_request) { cfg80211_scan_done(priv->scan_request, true); priv->scan_request = NULL; } /* Set current packet filter zero to block receiving data packets from device. */ filter = 0; rndis_set_oid(usbdev, RNDIS_OID_GEN_CURRENT_PACKET_FILTER, &filter, sizeof(filter)); return retval; } static const struct driver_info bcm4320b_info = { .description = "Wireless RNDIS device, BCM4320b based", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | FLAG_AVOID_UNLINK_URBS, .bind = rndis_wlan_bind, .unbind = rndis_wlan_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wlan_reset, .stop = rndis_wlan_stop, .early_init = bcm4320b_early_init, .indication = rndis_wlan_indication, }; static const struct driver_info bcm4320a_info = { .description = "Wireless RNDIS device, BCM4320a based", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | FLAG_AVOID_UNLINK_URBS, .bind = rndis_wlan_bind, .unbind = rndis_wlan_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wlan_reset, .stop = rndis_wlan_stop, .early_init = bcm4320a_early_init, .indication = rndis_wlan_indication, }; static const struct driver_info rndis_wlan_info = { .description = "Wireless RNDIS device", .flags = FLAG_WLAN | FLAG_FRAMING_RN | FLAG_NO_SETINT | FLAG_AVOID_UNLINK_URBS, .bind = rndis_wlan_bind, .unbind = rndis_wlan_unbind, .status = rndis_status, .rx_fixup = rndis_rx_fixup, .tx_fixup = rndis_tx_fixup, .reset = rndis_wlan_reset, .stop = rndis_wlan_stop, .early_init = unknown_early_init, .indication = rndis_wlan_indication, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { #define RNDIS_MASTER_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = 2 /* ACM */, \ .bInterfaceProtocol = 0x0ff /* INF driver for these devices have DriverVer >= 4.xx.xx.xx and many custom * parameters available. Chipset marked as 'BCM4320SKFBG' in NDISwrapper-wiki. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0411, .idProduct = 0x00bc, /* Buffalo WLI-U2-KG125S */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0baf, .idProduct = 0x011b, /* U.S. Robotics USR5421 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x050d, .idProduct = 0x011b, /* Belkin F5D7051 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1799, /* Belkin has two vendor ids */ .idProduct = 0x011b, /* Belkin F5D7051 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x0014, /* Linksys WUSB54GSv2 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x0026, /* Linksys WUSB54GSC */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0b05, .idProduct = 0x1717, /* Asus WL169gE */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0a5c, .idProduct = 0xd11b, /* Eminent EM4045 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1690, .idProduct = 0x0715, /* BT Voyager 1055 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320b_info, }, /* These devices have DriverVer < 4.xx.xx.xx and do not have any custom * parameters available, hardware probably contain older firmware version with * no way of updating. Chipset marked as 'BCM4320????' in NDISwrapper-wiki. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x13b1, .idProduct = 0x000e, /* Linksys WUSB54GSv1 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0baf, .idProduct = 0x0111, /* U.S. Robotics USR5420 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0411, .idProduct = 0x004b, /* BUFFALO WLI-USB-G54 */ RNDIS_MASTER_INTERFACE, .driver_info = (unsigned long) &bcm4320a_info, }, /* Generic Wireless RNDIS devices that we don't have exact * idVendor/idProduct/chip yet. */ { /* RNDIS is MSFT's un-official variant of CDC ACM */ USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), .driver_info = (unsigned long) &rndis_wlan_info, }, { /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */ USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1), .driver_info = (unsigned long) &rndis_wlan_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver rndis_wlan_driver = { .name = "rndis_wlan", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rndis_wlan_driver); MODULE_AUTHOR("Bjorge Dijkstra"); MODULE_AUTHOR("Jussi Kivilinna"); MODULE_DESCRIPTION("Driver for RNDIS based USB Wireless adapters"); MODULE_LICENSE("GPL");
gpl-2.0
Jackeagle/android_kernel_htc_dlxub1
drivers/md/dm-ioctl.c
736
38205
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/dm-ioctl.h> #include <linux/hdreg.h> #include <linux/compat.h> #include <asm/uaccess.h> #define DM_MSG_PREFIX "ioctl" #define DM_DRIVER_EMAIL "dm-devel@redhat.com" /*----------------------------------------------------------------- * The ioctl interface needs to be able to look up devices by * name or uuid. *---------------------------------------------------------------*/ struct hash_cell { struct list_head name_list; struct list_head uuid_list; char *name; char *uuid; struct mapped_device *md; struct dm_table *new_map; }; struct vers_iter { size_t param_size; struct dm_target_versions *vers, *old_vers; char *end; uint32_t flags; }; #define NUM_BUCKETS 64 #define MASK_BUCKETS (NUM_BUCKETS - 1) static struct list_head _name_buckets[NUM_BUCKETS]; static struct list_head _uuid_buckets[NUM_BUCKETS]; static void dm_hash_remove_all(int keep_open_devices); /* * Guards access to both hash tables. */ static DECLARE_RWSEM(_hash_lock); /* * Protects use of mdptr to obtain hash cell name and uuid from mapped device. */ static DEFINE_MUTEX(dm_hash_cells_mutex); static void init_buckets(struct list_head *buckets) { unsigned int i; for (i = 0; i < NUM_BUCKETS; i++) INIT_LIST_HEAD(buckets + i); } static int dm_hash_init(void) { init_buckets(_name_buckets); init_buckets(_uuid_buckets); return 0; } static void dm_hash_exit(void) { dm_hash_remove_all(0); } /*----------------------------------------------------------------- * Hash function: * We're not really concerned with the str hash function being * fast since it's only used by the ioctl interface. *---------------------------------------------------------------*/ static unsigned int hash_str(const char *str) { const unsigned int hash_mult = 2654435387U; unsigned int h = 0; while (*str) h = (h + (unsigned int) *str++) * hash_mult; return h & MASK_BUCKETS; } /*----------------------------------------------------------------- * Code for looking up a device by name *---------------------------------------------------------------*/ static struct hash_cell *__get_name_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _name_buckets + h, name_list) if (!strcmp(hc->name, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_uuid_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _uuid_buckets + h, uuid_list) if (!strcmp(hc->uuid, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_dev_cell(uint64_t dev) { struct mapped_device *md; struct hash_cell *hc; md = dm_get_md(huge_decode_dev(dev)); if (!md) return NULL; hc = dm_get_mdptr(md); if (!hc) { dm_put(md); return NULL; } return hc; } /*----------------------------------------------------------------- * Inserting, removing and renaming a device. *---------------------------------------------------------------*/ static struct hash_cell *alloc_cell(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *hc; hc = kmalloc(sizeof(*hc), GFP_KERNEL); if (!hc) return NULL; hc->name = kstrdup(name, GFP_KERNEL); if (!hc->name) { kfree(hc); return NULL; } if (!uuid) hc->uuid = NULL; else { hc->uuid = kstrdup(uuid, GFP_KERNEL); if (!hc->uuid) { kfree(hc->name); kfree(hc); return NULL; } } INIT_LIST_HEAD(&hc->name_list); INIT_LIST_HEAD(&hc->uuid_list); hc->md = md; hc->new_map = NULL; return hc; } static void free_cell(struct hash_cell *hc) { if (hc) { kfree(hc->name); kfree(hc->uuid); kfree(hc); } } /* * The kdev_t and uuid of a device can never change once it is * initially inserted. */ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *cell, *hc; /* * Allocate the new cells. */ cell = alloc_cell(name, uuid, md); if (!cell) return -ENOMEM; /* * Insert the cell into both hash tables. */ down_write(&_hash_lock); hc = __get_name_cell(name); if (hc) { dm_put(hc->md); goto bad; } list_add(&cell->name_list, _name_buckets + hash_str(name)); if (uuid) { hc = __get_uuid_cell(uuid); if (hc) { list_del(&cell->name_list); dm_put(hc->md); goto bad; } list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); } dm_get(md); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(md, cell); mutex_unlock(&dm_hash_cells_mutex); up_write(&_hash_lock); return 0; bad: up_write(&_hash_lock); free_cell(cell); return -EBUSY; } static void __hash_remove(struct hash_cell *hc) { struct dm_table *table; /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(hc->md, NULL); mutex_unlock(&dm_hash_cells_mutex); table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); } if (hc->new_map) dm_table_destroy(hc->new_map); dm_put(hc->md); free_cell(hc); } static void dm_hash_remove_all(int keep_open_devices) { int i, dev_skipped; struct hash_cell *hc; struct mapped_device *md; retry: dev_skipped = 0; down_write(&_hash_lock); for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry(hc, _name_buckets + i, name_list) { md = hc->md; dm_get(md); if (keep_open_devices && dm_lock_for_deletion(md)) { dm_put(md); dev_skipped++; continue; } __hash_remove(hc); up_write(&_hash_lock); dm_put(md); if (likely(keep_open_devices)) dm_destroy(md); else dm_destroy_immediate(md); /* * Some mapped devices may be using other mapped * devices, so repeat until we make no further * progress. If a new mapped device is created * here it will also get removed. */ goto retry; } } up_write(&_hash_lock); if (dev_skipped) DMWARN("remove_all left %d open device(s)", dev_skipped); } /* * Set the uuid of a hash_cell that isn't already set. */ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) { mutex_lock(&dm_hash_cells_mutex); hc->uuid = new_uuid; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); } /* * Changes the name of a hash_cell and returns the old name for * the caller to free. */ static char *__change_cell_name(struct hash_cell *hc, char *new_name) { char *old_name; /* * Rename and move the name cell. */ list_del(&hc->name_list); old_name = hc->name; mutex_lock(&dm_hash_cells_mutex); hc->name = new_name; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->name_list, _name_buckets + hash_str(new_name)); return old_name; } static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, const char *new) { char *new_data, *old_name = NULL; struct hash_cell *hc; struct dm_table *table; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; /* * duplicate new. */ new_data = kstrdup(new, GFP_KERNEL); if (!new_data) return ERR_PTR(-ENOMEM); down_write(&_hash_lock); /* * Is new free ? */ if (change_uuid) hc = __get_uuid_cell(new); else hc = __get_name_cell(new); if (hc) { DMWARN("Unable to change %s on mapped device %s to one that " "already exists: %s", change_uuid ? "uuid" : "name", param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EBUSY); } /* * Is there such a device as 'old' ? */ hc = __get_name_cell(param->name); if (!hc) { DMWARN("Unable to rename non-existent device, %s to %s%s", param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); } /* * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { DMWARN("Unable to change uuid of mapped device %s to %s " "because uuid is already set to %s", param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EINVAL); } if (change_uuid) __set_cell_uuid(hc, new_data); else old_name = __change_cell_name(hc, new_data); /* * Wake up any dm event waiters. */ table = dm_get_live_table(hc->md); if (table) { dm_table_event(table); dm_table_put(table); } if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; md = hc->md; up_write(&_hash_lock); kfree(old_name); return md; } /*----------------------------------------------------------------- * Implementation of the ioctl commands *---------------------------------------------------------------*/ /* * All the ioctl commands get dispatched to functions with this * prototype. */ typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size); static int remove_all(struct dm_ioctl *param, size_t param_size) { dm_hash_remove_all(1); param->data_size = 0; return 0; } /* * Round up the ptr to an 8-byte boundary. */ #define ALIGN_MASK 7 static inline void *align_ptr(void *ptr) { return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); } /* * Retrieves the data payload buffer from an already allocated * struct dm_ioctl. */ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, size_t *len) { param->data_start = align_ptr(param + 1) - (void *) param; if (param->data_start < param_size) *len = param_size - param->data_start; else *len = 0; return ((void *) param) + param->data_start; } static int list_devices(struct dm_ioctl *param, size_t param_size) { unsigned int i; struct hash_cell *hc; size_t len, needed = 0; struct gendisk *disk; struct dm_name_list *nl, *old_nl = NULL; down_write(&_hash_lock); /* * Loop through all the devices working out how much * space we need. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { needed += sizeof(struct dm_name_list); needed += strlen(hc->name) + 1; needed += ALIGN_MASK; } } /* * Grab our output buffer. */ nl = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; nl->dev = 0; /* Flags no data */ /* * Now loop through filling out the names. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { if (old_nl) old_nl->next = (uint32_t) ((void *) nl - (void *) old_nl); disk = dm_disk(hc->md); nl->dev = huge_encode_dev(disk_devt(disk)); nl->next = 0; strcpy(nl->name, hc->name); old_nl = nl; nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1); } } out: up_write(&_hash_lock); return 0; } static void list_version_get_needed(struct target_type *tt, void *needed_param) { size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); *needed += strlen(tt->name); *needed += ALIGN_MASK; } static void list_version_get_info(struct target_type *tt, void *param) { struct vers_iter *info = param; /* Check space - it might have changed since the first iteration */ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) { info->flags = DM_BUFFER_FULL_FLAG; return; } if (info->old_vers) info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers); info->vers->version[0] = tt->version[0]; info->vers->version[1] = tt->version[1]; info->vers->version[2] = tt->version[2]; info->vers->next = 0; strcpy(info->vers->name, tt->name); info->old_vers = info->vers; info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); } static int list_versions(struct dm_ioctl *param, size_t param_size) { size_t len, needed = 0; struct dm_target_versions *vers; struct vers_iter iter_info; /* * Loop through all the devices working out how much * space we need. */ dm_target_iterate(list_version_get_needed, &needed); /* * Grab our output buffer. */ vers = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; iter_info.param_size = param_size; iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; iter_info.end = (char *)vers+len; /* * Now loop through filling out the names & versions. */ dm_target_iterate(list_version_get_info, &iter_info); param->flags |= iter_info.flags; out: return 0; } static int check_name(const char *name) { if (strchr(name, '/')) { DMWARN("invalid device name"); return -EINVAL; } return 0; } /* * On successful return, the caller must not attempt to acquire * _hash_lock without first calling dm_table_put, because dm_table_destroy * waits for this dm_table_put and could be called under this lock. */ static struct dm_table *dm_get_inactive_table(struct mapped_device *md) { struct hash_cell *hc; struct dm_table *table = NULL; down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); goto out; } table = hc->new_map; if (table) dm_table_get(table); out: up_read(&_hash_lock); return table; } static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, struct dm_ioctl *param) { return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? dm_get_inactive_table(md) : dm_get_live_table(md); } /* * Fills in a dm_ioctl structure, ready for sending back to * userland. */ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) { struct gendisk *disk = dm_disk(md); struct dm_table *table; param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG); if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; param->dev = huge_encode_dev(disk_devt(disk)); /* * Yes, this will be out of date by the time it gets back * to userland, but it is still very useful for * debugging. */ param->open_count = dm_open_count(md); param->event_nr = dm_get_event_nr(md); param->target_count = 0; table = dm_get_live_table(md); if (table) { if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { if (get_disk_ro(disk)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); } dm_table_put(table); param->flags |= DM_ACTIVE_PRESENT_FLAG; } if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { table = dm_get_inactive_table(md); if (table) { if (!(dm_table_get_mode(table) & FMODE_WRITE)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); dm_table_put(table); } } } static int dev_create(struct dm_ioctl *param, size_t param_size) { int r, m = DM_ANY_MINOR; struct mapped_device *md; r = check_name(param->name); if (r) return r; if (param->flags & DM_PERSISTENT_DEV_FLAG) m = MINOR(huge_decode_dev(param->dev)); r = dm_create(m, &md); if (r) return r; r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); if (r) { dm_put(md); dm_destroy(md); return r; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); dm_put(md); return 0; } /* * Always use UUID for lookups if it's present, otherwise use name or dev. */ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) { struct hash_cell *hc = NULL; if (*param->uuid) { if (*param->name || param->dev) return NULL; hc = __get_uuid_cell(param->uuid); if (!hc) return NULL; } else if (*param->name) { if (param->dev) return NULL; hc = __get_name_cell(param->name); if (!hc) return NULL; } else if (param->dev) { hc = __get_dev_cell(param->dev); if (!hc) return NULL; } else return NULL; /* * Sneakily write in both the name and the uuid * while we have the cell. */ strlcpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid) strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); else param->uuid[0] = '\0'; if (hc->new_map) param->flags |= DM_INACTIVE_PRESENT_FLAG; else param->flags &= ~DM_INACTIVE_PRESENT_FLAG; return hc; } static struct mapped_device *find_device(struct dm_ioctl *param) { struct hash_cell *hc; struct mapped_device *md = NULL; down_read(&_hash_lock); hc = __find_device_hash_cell(param); if (hc) md = hc->md; up_read(&_hash_lock); return md; } static int dev_remove(struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; int r; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; /* * Ensure the device is not open and nothing further can open it. */ r = dm_lock_for_deletion(md); if (r) { DMDEBUG_LIMIT("unable to remove open device %s", hc->name); up_write(&_hash_lock); dm_put(md); return r; } __hash_remove(hc); up_write(&_hash_lock); if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; dm_put(md); dm_destroy(md); return 0; } /* * Check a string doesn't overrun the chunk of * memory we copied from userland. */ static int invalid_str(char *str, void *end) { while ((void *) str < end) if (!*str++) return 0; return -EINVAL; } static int dev_rename(struct dm_ioctl *param, size_t param_size) { int r; char *new_data = (char *) param + param->data_start; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { DMWARN("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } if (!change_uuid) { r = check_name(new_data); if (r) return r; } md = dm_hash_rename(param, new_data); if (IS_ERR(md)) return PTR_ERR(md); __dev_status(md, param); dm_put(md); return 0; } static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) { int r = -EINVAL, x; struct mapped_device *md; struct hd_geometry geometry; unsigned long indata[4]; char *geostr = (char *) param + param->data_start; char dummy; md = find_device(param); if (!md) return -ENXIO; if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { DMWARN("Invalid geometry supplied."); goto out; } x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { DMWARN("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { DMWARN("Geometry exceeds range limits."); goto out; } geometry.cylinders = indata[0]; geometry.heads = indata[1]; geometry.sectors = indata[2]; geometry.start = indata[3]; r = dm_set_geometry(md, &geometry); param->data_size = 0; out: dm_put(md); return r; } static int do_suspend(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) { r = dm_suspend(md, suspend_flags); if (r) goto out; } __dev_status(md, param); out: dm_put(md); return r; } static int do_resume(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; new_map = hc->new_map; hc->new_map = NULL; param->flags &= ~DM_INACTIVE_PRESENT_FLAG; up_write(&_hash_lock); /* Do we need to load a new map ? */ if (new_map) { /* Suspend if it isn't already suspended */ if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); old_map = dm_swap_table(md, new_map); if (IS_ERR(old_map)) { dm_table_destroy(new_map); dm_put(md); return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) set_disk_ro(dm_disk(md), 0); else set_disk_ro(dm_disk(md), 1); } if (dm_suspended_md(md)) { r = dm_resume(md); if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; } if (old_map) dm_table_destroy(old_map); if (!r) __dev_status(md, param); dm_put(md); return r; } /* * Set or unset the suspension state of a device. * If the device already is in the requested state we just return its status. */ static int dev_suspend(struct dm_ioctl *param, size_t param_size) { if (param->flags & DM_SUSPEND_FLAG) return do_suspend(param); return do_resume(param); } /* * Copies device info back to user space, used by * the create and info ioctls. */ static int dev_status(struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); dm_put(md); return 0; } /* * Build up the status struct for each target */ static void retrieve_status(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int i, num_targets; struct dm_target_spec *spec; char *outbuf, *outptr; status_type_t type; size_t remaining, len, used = 0; outptr = outbuf = get_result_buffer(param, param_size, &len); if (param->flags & DM_STATUS_TABLE_FLAG) type = STATUSTYPE_TABLE; else type = STATUSTYPE_INFO; /* Get all the target info */ num_targets = dm_table_get_num_targets(table); for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); size_t l; remaining = len - (outptr - outbuf); if (remaining <= sizeof(struct dm_target_spec)) { param->flags |= DM_BUFFER_FULL_FLAG; break; } spec = (struct dm_target_spec *) outptr; spec->status = 0; spec->sector_start = ti->begin; spec->length = ti->len; strncpy(spec->target_type, ti->type->name, sizeof(spec->target_type)); outptr += sizeof(struct dm_target_spec); remaining = len - (outptr - outbuf); if (remaining <= 0) { param->flags |= DM_BUFFER_FULL_FLAG; break; } /* Get the status/table string from the target driver */ if (ti->type->status) ti->type->status(ti, type, outptr, remaining); else outptr[0] = '\0'; l = strlen(outptr) + 1; if (l == remaining) { param->flags |= DM_BUFFER_FULL_FLAG; break; } outptr += l; used = param->data_start + (outptr - outbuf); outptr = align_ptr(outptr); spec->next = outptr - outbuf; } if (used) param->data_size = used; param->target_count = num_targets; } /* * Wait for a device to report an event */ static int dev_wait(struct dm_ioctl *param, size_t param_size) { int r = 0; struct mapped_device *md; struct dm_table *table; md = find_device(param); if (!md) return -ENXIO; /* * Wait for a notification event */ if (dm_wait_event(md, param->event_nr)) { r = -ERESTARTSYS; goto out; } /* * The userland program is going to want to know what * changed to trigger the event, so we may as well tell * him and save an ioctl. */ __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); } out: dm_put(md); return r; } static inline fmode_t get_mode(struct dm_ioctl *param) { fmode_t mode = FMODE_READ | FMODE_WRITE; if (param->flags & DM_READONLY_FLAG) mode = FMODE_READ; return mode; } static int next_target(struct dm_target_spec *last, uint32_t next, void *end, struct dm_target_spec **spec, char **target_params) { *spec = (struct dm_target_spec *) ((unsigned char *) last + next); *target_params = (char *) (*spec + 1); if (*spec < (last + 1)) return -EINVAL; return invalid_str(*target_params, end); } static int populate_table(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { int r; unsigned int i = 0; struct dm_target_spec *spec = (struct dm_target_spec *) param; uint32_t next = param->data_start; void *end = (void *) param + param_size; char *target_params; if (!param->target_count) { DMWARN("populate_table: no targets specified"); return -EINVAL; } for (i = 0; i < param->target_count; i++) { r = next_target(spec, next, end, &spec, &target_params); if (r) { DMWARN("unable to find target"); return r; } r = dm_table_add_target(table, spec->target_type, (sector_t) spec->sector_start, (sector_t) spec->length, target_params); if (r) { DMWARN("error adding target to table"); return r; } next = spec->next; } return dm_table_complete(table); } static int table_load(struct dm_ioctl *param, size_t param_size) { int r; struct hash_cell *hc; struct dm_table *t; struct mapped_device *md; struct target_type *immutable_target_type; md = find_device(param); if (!md) return -ENXIO; r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) goto out; r = populate_table(t, param, param_size); if (r) { dm_table_destroy(t); goto out; } immutable_target_type = dm_get_immutable_target_type(md); if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t))) { DMWARN("can't replace immutable target type %s", immutable_target_type->name); dm_table_destroy(t); r = -EINVAL; goto out; } /* Protect md->type and md->queue against concurrent table loads. */ dm_lock_md_type(md); if (dm_get_md_type(md) == DM_TYPE_NONE) /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); else if (dm_get_md_type(md) != dm_table_get_type(t)) { DMWARN("can't change device type after initial table load."); dm_table_destroy(t); dm_unlock_md_type(md); r = -EINVAL; goto out; } /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md); if (r) { DMWARN("unable to set up device queue for new table."); dm_table_destroy(t); dm_unlock_md_type(md); goto out; } dm_unlock_md_type(md); /* stage inactive table */ down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); dm_table_destroy(t); up_write(&_hash_lock); r = -ENXIO; goto out; } if (hc->new_map) dm_table_destroy(hc->new_map); hc->new_map = t; up_write(&_hash_lock); param->flags |= DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); out: dm_put(md); return r; } static int table_clear(struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } if (hc->new_map) { dm_table_destroy(hc->new_map); hc->new_map = NULL; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(hc->md, param); md = hc->md; up_write(&_hash_lock); dm_put(md); return 0; } /* * Retrieves a list of devices used by a particular dm device. */ static void retrieve_deps(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int count = 0; struct list_head *tmp; size_t len, needed; struct dm_dev_internal *dd; struct dm_target_deps *deps; deps = get_result_buffer(param, param_size, &len); /* * Count the devices. */ list_for_each (tmp, dm_table_get_devices(table)) count++; /* * Check we have enough space. */ needed = sizeof(*deps) + (sizeof(*deps->dev) * count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; return; } /* * Fill in the devices. */ deps->count = count; count = 0; list_for_each_entry (dd, dm_table_get_devices(table), list) deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev); param->data_size = param->data_start + needed; } static int table_deps(struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_deps(table, param, param_size); dm_table_put(table); } dm_put(md); return 0; } /* * Return the status of a device as a text string for each * target. */ static int table_status(struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param); if (table) { retrieve_status(table, param, param_size); dm_table_put(table); } dm_put(md); return 0; } /* * Pass a message to the target that's at the supplied device offset. */ static int target_message(struct dm_ioctl *param, size_t param_size) { int r, argc; char **argv; struct mapped_device *md; struct dm_table *table; struct dm_target *ti; struct dm_target_msg *tmsg = (void *) param + param->data_start; md = find_device(param); if (!md) return -ENXIO; if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { DMWARN("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { DMWARN("Failed to split target message parameters"); goto out; } if (!argc) { DMWARN("Empty message received."); goto out_argv; } table = dm_get_live_table(md); if (!table) goto out_argv; if (dm_deleting_md(md)) { r = -ENXIO; goto out_table; } ti = dm_table_find_target(table, tmsg->sector); if (!dm_target_is_valid(ti)) { DMWARN("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv); else { DMWARN("Target type does not support messages"); r = -EINVAL; } out_table: dm_table_put(table); out_argv: kfree(argv); out: param->data_size = 0; dm_put(md); return r; } /*----------------------------------------------------------------- * Implementation of open/close/ioctl on the special char * device. *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd) { static struct { int cmd; ioctl_fn fn; } _ioctls[] = { {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */ {DM_REMOVE_ALL_CMD, remove_all}, {DM_LIST_DEVICES_CMD, list_devices}, {DM_DEV_CREATE_CMD, dev_create}, {DM_DEV_REMOVE_CMD, dev_remove}, {DM_DEV_RENAME_CMD, dev_rename}, {DM_DEV_SUSPEND_CMD, dev_suspend}, {DM_DEV_STATUS_CMD, dev_status}, {DM_DEV_WAIT_CMD, dev_wait}, {DM_TABLE_LOAD_CMD, table_load}, {DM_TABLE_CLEAR_CMD, table_clear}, {DM_TABLE_DEPS_CMD, table_deps}, {DM_TABLE_STATUS_CMD, table_status}, {DM_LIST_VERSIONS_CMD, list_versions}, {DM_TARGET_MSG_CMD, target_message}, {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry} }; return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn; } /* * As well as checking the version compatibility this always * copies the kernel interface version out. */ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) { uint32_t version[3]; int r = 0; if (copy_from_user(version, user->version, sizeof(version))) return -EFAULT; if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { DMWARN("ioctl interface mismatch: " "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, version[0], version[1], version[2], cmd); r = -EINVAL; } /* * Fill in the kernel version. */ version[0] = DM_VERSION_MAJOR; version[1] = DM_VERSION_MINOR; version[2] = DM_VERSION_PATCHLEVEL; if (copy_to_user(user->version, version, sizeof(version))) return -EFAULT; return r; } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) { struct dm_ioctl tmp, *dmi; int secure_data; if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data))) return -EFAULT; if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data))) return -EINVAL; secure_data = tmp.flags & DM_SECURE_DATA_FLAG; dmi = vmalloc(tmp.data_size); if (!dmi) { if (secure_data && clear_user(user, tmp.data_size)) return -EFAULT; return -ENOMEM; } if (copy_from_user(dmi, user, tmp.data_size)) goto bad; /* * Abort if something changed the ioctl data while it was being copied. */ if (dmi->data_size != tmp.data_size) { DMERR("rejecting ioctl: data size modified while processing parameters"); goto bad; } /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, tmp.data_size)) goto bad; *param = dmi; return 0; bad: if (secure_data) memset(dmi, 0, tmp.data_size); vfree(dmi); return -EFAULT; } static int validate_params(uint cmd, struct dm_ioctl *param) { /* Always clear this flag */ param->flags &= ~DM_BUFFER_FULL_FLAG; param->flags &= ~DM_UEVENT_GENERATED_FLAG; param->flags &= ~DM_SECURE_DATA_FLAG; /* Ignores parameters */ if (cmd == DM_REMOVE_ALL_CMD || cmd == DM_LIST_DEVICES_CMD || cmd == DM_LIST_VERSIONS_CMD) return 0; if ((cmd == DM_DEV_CREATE_CMD)) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; } } else if ((*param->uuid && *param->name)) { DMWARN("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } /* Ensure strings are terminated */ param->name[DM_NAME_LEN - 1] = '\0'; param->uuid[DM_UUID_LEN - 1] = '\0'; return 0; } static int ctl_ioctl(uint command, struct dm_ioctl __user *user) { int r = 0; int wipe_buffer; unsigned int cmd; struct dm_ioctl *uninitialized_var(param); ioctl_fn fn = NULL; size_t input_param_size; /* only root can play with this */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (_IOC_TYPE(command) != DM_IOCTL) return -ENOTTY; cmd = _IOC_NR(command); /* * Check the interface version passed in. This also * writes out the kernel's interface version. */ r = check_version(cmd, user); if (r) return r; /* * Nothing more to do for the version command. */ if (cmd == DM_VERSION_CMD) return 0; fn = lookup_ioctl(cmd); if (!fn) { DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } /* * Trying to avoid low memory issues when a device is * suspended. */ current->flags |= PF_MEMALLOC; /* * Copy the parameters into kernel space. */ r = copy_params(user, &param); current->flags &= ~PF_MEMALLOC; if (r) return r; input_param_size = param->data_size; wipe_buffer = param->flags & DM_SECURE_DATA_FLAG; r = validate_params(cmd, param); if (r) goto out; param->data_size = sizeof(*param); r = fn(param, input_param_size); /* * Copy the results back to userland. */ if (!r && copy_to_user(user, param, param->data_size)) r = -EFAULT; out: if (wipe_buffer) memset(param, 0, input_param_size); vfree(param); return r; } static long dm_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u); } #ifdef CONFIG_COMPAT static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); } #else #define dm_compat_ctl_ioctl NULL #endif static const struct file_operations _ctl_fops = { .open = nonseekable_open, .unlocked_ioctl = dm_ctl_ioctl, .compat_ioctl = dm_compat_ctl_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice _dm_misc = { .minor = MAPPER_CTRL_MINOR, .name = DM_NAME, .nodename = DM_DIR "/" DM_CONTROL_NODE, .fops = &_ctl_fops }; MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); /* * Create misc character device and link to DM_DIR/control. */ int __init dm_interface_init(void) { int r; r = dm_hash_init(); if (r) return r; r = misc_register(&_dm_misc); if (r) { DMERR("misc_register failed for control device"); dm_hash_exit(); return r; } DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, DM_DRIVER_EMAIL); return 0; } void dm_interface_exit(void) { if (misc_deregister(&_dm_misc) < 0) DMERR("misc_deregister failed for control device"); dm_hash_exit(); } /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device * @name: Buffer (size DM_NAME_LEN) for name * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined */ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) { int r = 0; struct hash_cell *hc; if (!md) return -ENXIO; mutex_lock(&dm_hash_cells_mutex); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { r = -ENXIO; goto out; } if (name) strcpy(name, hc->name); if (uuid) strcpy(uuid, hc->uuid ? : ""); out: mutex_unlock(&dm_hash_cells_mutex); return r; }
gpl-2.0
zachariasmaladroit/android_kernel_samsung_aries
arch/sparc/mm/io-unit.c
1504
7389
/* * io-unit.c: IO-UNIT specific routines for memory management. * * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */ #include <linux/bitops.h> #include <linux/scatterlist.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/io-unit.h> #include <asm/mxcc.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/oplib.h> /* #define IOUNIT_DEBUG */ #ifdef IOUNIT_DEBUG #define IOD(x) printk(x) #else #define IOD(x) do { } while (0) #endif #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID) #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM) static void __init iounit_iommu_init(struct of_device *op) { struct iounit_struct *iounit; iopte_t *xpt, *xptend; iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC); if (!iounit) { prom_printf("SUN4D: Cannot alloc iounit, halting.\n"); prom_halt(); } iounit->limit[0] = IOUNIT_BMAP1_START; iounit->limit[1] = IOUNIT_BMAP2_START; iounit->limit[2] = IOUNIT_BMAPM_START; iounit->limit[3] = IOUNIT_BMAPM_END; iounit->rotor[1] = IOUNIT_BMAP2_START; iounit->rotor[2] = IOUNIT_BMAPM_START; xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT"); if (!xpt) { prom_printf("SUN4D: Cannot map External Page Table."); prom_halt(); } op->dev.archdata.iommu = iounit; iounit->page_table = xpt; spin_lock_init(&iounit->lock); for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); xpt < xptend;) iopte_val(*xpt++) = 0; } static int __init iounit_init(void) { extern void sun4d_init_sbi_irq(void); struct device_node *dp; for_each_node_by_name(dp, "sbi") { struct of_device *op = of_find_device_by_node(dp); iounit_iommu_init(op); of_propagate_archdata(op); } sun4d_init_sbi_irq(); return 0; } subsys_initcall(iounit_init); /* One has to hold iounit->lock to call this */ static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) { int i, j, k, npages; unsigned long rotor, scan, limit; iopte_t iopte; npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; /* A tiny bit of magic ingredience :) */ switch (npages) { case 1: i = 0x0231; break; case 2: i = 0x0132; break; default: i = 0x0213; break; } IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); next: j = (i & 15); rotor = iounit->rotor[j - 1]; limit = iounit->limit[j]; scan = rotor; nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); if (scan + npages > limit) { if (limit != rotor) { limit = rotor; scan = iounit->limit[j - 1]; goto nexti; } i >>= 4; if (!(i & 15)) panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); goto next; } for (k = 1, scan++; k < npages; k++) if (test_bit(scan++, iounit->bmap)) goto nexti; iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1]; scan -= npages; iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { set_bit(scan, iounit->bmap); iounit->page_table[scan] = iopte; } IOD(("%08lx\n", vaddr)); return vaddr; } static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long ret, flags; spin_lock_irqsave(&iounit->lock, flags); ret = iounit_get_area(iounit, (unsigned long)vaddr, len); spin_unlock_irqrestore(&iounit->lock, flags); return ret; } static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ spin_lock_irqsave(&iounit->lock, flags); while (sz != 0) { --sz; sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); sg->dma_length = sg->length; sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; spin_lock_irqsave(&iounit->lock, flags); len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); spin_unlock_irqrestore(&iounit->lock, flags); } static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; unsigned long vaddr, len; spin_lock_irqsave(&iounit->lock, flags); while (sz != 0) { --sz; len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); sg = sg_next(sg); } spin_unlock_irqrestore(&iounit->lock, flags); } #ifdef CONFIG_SBUS static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long page, end; pgprot_t dvma_prot; iopte_t *iopte; *pba = addr; dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); end = PAGE_ALIGN((addr + len)); while(addr < end) { page = va; { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; long i; pgdp = pgd_offset(&init_mm, addr); pmdp = pmd_offset(pgdp, addr); ptep = pte_offset_map(pmdp, addr); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); iopte = (iopte_t *)(iounit->page_table + i); *iopte = MKIOPTE(__pa(page)); } addr += PAGE_SIZE; va += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); return 0; } static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len) { /* XXX Somebody please fill this in */ } #endif static char *iounit_lockarea(char *vaddr, unsigned long len) { /* FIXME: Write this */ return vaddr; } static void iounit_unlockarea(char *vaddr, unsigned long len) { /* FIXME: Write this */ } void __init ld_mmu_iounit(void) { BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0); BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM); #ifdef CONFIG_SBUS BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM); #endif }
gpl-2.0
ronasimi/LGF180-Optimus-G-_Android_KK_v30a_Kernel
drivers/media/video/msm/s5k4e1_reg.c
1760
4574
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "s5k4e1.h" struct s5k4e1_i2c_reg_conf s5k4e1_mipi_settings[] = { {0x30BD, 0x00},/* SEL_CCP[0] */ {0x3084, 0x15},/* SYNC Mode */ {0x30BE, 0x1A},/* M_PCLKDIV_AUTO[4], M_DIV_PCLK[3:0] */ {0x30C1, 0x01},/* pack video enable [0] */ {0x30EE, 0x02},/* DPHY enable [ 1] */ {0x3111, 0x86},/* Embedded data off [5] */ }; /* PLL Configuration */ struct s5k4e1_i2c_reg_conf s5k4e1_pll_preview_settings[] = { {0x0305, 0x04}, {0x0306, 0x00}, {0x0307, 0x44}, {0x30B5, 0x00}, {0x30E2, 0x01},/* num lanes[1:0] = 2 */ {0x30F1, 0xB0}, }; struct s5k4e1_i2c_reg_conf s5k4e1_pll_snap_settings[] = { {0x0305, 0x04}, {0x0306, 0x00}, {0x0307, 0x44}, {0x30B5, 0x00}, {0x30E2, 0x01},/* num lanes[1:0] = 2 */ {0x30F1, 0xB0}, }; struct s5k4e1_i2c_reg_conf s5k4e1_prev_settings[] = { /* output size (1304 x 980) */ {0x30A9, 0x02},/* Horizontal Binning On */ {0x300E, 0xEB},/* Vertical Binning On */ {0x0387, 0x03},/* y_odd_inc 03(10b AVG) */ {0x0344, 0x00},/* x_addr_start 0 */ {0x0345, 0x00}, {0x0348, 0x0A},/* x_addr_end 2607 */ {0x0349, 0x2F}, {0x0346, 0x00},/* y_addr_start 0 */ {0x0347, 0x00}, {0x034A, 0x07},/* y_addr_end 1959 */ {0x034B, 0xA7}, {0x0380, 0x00},/* x_even_inc 1 */ {0x0381, 0x01}, {0x0382, 0x00},/* x_odd_inc 1 */ {0x0383, 0x01}, {0x0384, 0x00},/* y_even_inc 1 */ {0x0385, 0x01}, {0x0386, 0x00},/* y_odd_inc 3 */ {0x0387, 0x03}, {0x034C, 0x05},/* x_output_size 1304 */ {0x034D, 0x18}, {0x034E, 0x03},/* y_output_size 980 */ {0x034F, 0xd4}, {0x30BF, 0xAB},/* outif_enable[7], data_type[5:0](2Bh = bayer 10bit} */ {0x30C0, 0xA0},/* video_offset[7:4] 3260%12 */ {0x30C8, 0x06},/* video_data_length 1600 = 1304 * 1.25 */ {0x30C9, 0x5E}, /* Timing Configuration */ {0x0202, 0x03}, {0x0203, 0x14}, {0x0204, 0x00}, {0x0205, 0x80}, {0x0340, 0x03},/* Frame Length */ {0x0341, 0xE0}, {0x0342, 0x0A},/* 2738 Line Length */ {0x0343, 0xB2}, }; struct s5k4e1_i2c_reg_conf s5k4e1_snap_settings[] = { /*Output Size (2608x1960)*/ {0x30A9, 0x03},/* Horizontal Binning Off */ {0x300E, 0xE8},/* Vertical Binning Off */ {0x0387, 0x01},/* y_odd_inc */ {0x034C, 0x0A},/* x_output size */ {0x034D, 0x30}, {0x034E, 0x07},/* y_output size */ {0x034F, 0xA8}, {0x30BF, 0xAB},/* outif_enable[7], data_type[5:0](2Bh = bayer 10bit} */ {0x30C0, 0x80},/* video_offset[7:4] 3260%12 */ {0x30C8, 0x0C},/* video_data_length 3260 = 2608 * 1.25 */ {0x30C9, 0xBC}, /*Timing configuration*/ {0x0202, 0x06}, {0x0203, 0x28}, {0x0204, 0x00}, {0x0205, 0x80}, {0x0340, 0x07},/* Frame Length */ {0x0341, 0xB4}, {0x0342, 0x0A},/* 2738 Line Length */ {0x0343, 0xB2}, }; struct s5k4e1_i2c_reg_conf s5k4e1_recommend_settings[] = { /*CDS timing setting ... */ {0x3000, 0x05}, {0x3001, 0x03}, {0x3002, 0x08}, {0x3003, 0x0A}, {0x3004, 0x50}, {0x3005, 0x0E}, {0x3006, 0x5E}, {0x3007, 0x00}, {0x3008, 0x78}, {0x3009, 0x78}, {0x300A, 0x50}, {0x300B, 0x08}, {0x300C, 0x14}, {0x300D, 0x00}, {0x300E, 0xE8}, {0x300F, 0x82}, {0x301B, 0x77}, /* CDS option setting ... */ {0x3010, 0x00}, {0x3011, 0x3A}, {0x3029, 0x04}, {0x3012, 0x30}, {0x3013, 0xA0}, {0x3014, 0x00}, {0x3015, 0x00}, {0x3016, 0x30}, {0x3017, 0x94}, {0x3018, 0x70}, {0x301D, 0xD4}, {0x3021, 0x02}, {0x3022, 0x24}, {0x3024, 0x40}, {0x3027, 0x08}, /* Pixel option setting ... */ {0x301C, 0x04}, {0x30D8, 0x3F}, {0x302B, 0x01}, {0x3070, 0x5F}, {0x3071, 0x00}, {0x3080, 0x04}, {0x3081, 0x38}, }; struct s5k4e1_reg s5k4e1_regs = { .reg_mipi = &s5k4e1_mipi_settings[0], .reg_mipi_size = ARRAY_SIZE(s5k4e1_mipi_settings), .rec_settings = &s5k4e1_recommend_settings[0], .rec_size = ARRAY_SIZE(s5k4e1_recommend_settings), .reg_pll_p = &s5k4e1_pll_preview_settings[0], .reg_pll_p_size = ARRAY_SIZE(s5k4e1_pll_preview_settings), .reg_pll_s = &s5k4e1_pll_snap_settings[0], .reg_pll_s_size = ARRAY_SIZE(s5k4e1_pll_snap_settings), .reg_prev = &s5k4e1_prev_settings[0], .reg_prev_size = ARRAY_SIZE(s5k4e1_prev_settings), .reg_snap = &s5k4e1_snap_settings[0], .reg_snap_size = ARRAY_SIZE(s5k4e1_snap_settings), };
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissevewifi
drivers/isdn/mISDN/dsp_core.c
2272
33946
/* * Author Andreas Eversberg (jolly@eversberg.eu) * Based on source code structure by * Karsten Keil (keil@isdn4linux.de) * * This file is (c) under GNU PUBLIC LICENSE * For changes and modifications please read * ../../../Documentation/isdn/mISDN.cert * * Thanks to Karsten Keil (great drivers) * Cologne Chip (great chips) * * This module does: * Real-time tone generation * DTMF detection * Real-time cross-connection and conferrence * Compensate jitter due to system load and hardware fault. * All features are done in kernel space and will be realized * using hardware, if available and supported by chip set. * Blowfish encryption/decryption */ /* STRUCTURE: * * The dsp module provides layer 2 for b-channels (64kbit). It provides * transparent audio forwarding with special digital signal processing: * * - (1) generation of tones * - (2) detection of dtmf tones * - (3) crossconnecting and conferences (clocking) * - (4) echo generation for delay test * - (5) volume control * - (6) disable receive data * - (7) pipeline * - (8) encryption/decryption * * Look: * TX RX * ------upper layer------ * | ^ * | |(6) * v | * +-----+-------------+-----+ * |(3)(4) | * | CMX | * | | * | +-------------+ * | | ^ * | | | * |+---------+| +----+----+ * ||(1) || |(2) | * || || | | * || Tones || | DTMF | * || || | | * || || | | * |+----+----+| +----+----+ * +-----+-----+ ^ * | | * v | * +----+----+ +----+----+ * |(5) | |(5) | * | | | | * |TX Volume| |RX Volume| * | | | | * | | | | * +----+----+ +----+----+ * | ^ * | | * v | * +----+-------------+----+ * |(7) | * | | * | Pipeline Processing | * | | * | | * +----+-------------+----+ * | ^ * | | * v | * +----+----+ +----+----+ * |(8) | |(8) | * | | | | * | Encrypt | | Decrypt | * | | | | * | | | | * +----+----+ +----+----+ * | ^ * | | * v | * ------card layer------ * TX RX * * Above you can see the logical data flow. If software is used to do the * process, it is actually the real data flow. If hardware is used, data * may not flow, but hardware commands to the card, to provide the data flow * as shown. * * NOTE: The channel must be activated in order to make dsp work, even if * no data flow to the upper layer is intended. Activation can be done * after and before controlling the setting using PH_CONTROL requests. * * DTMF: Will be detected by hardware if possible. It is done before CMX * processing. * * Tones: Will be generated via software if endless looped audio fifos are * not supported by hardware. Tones will override all data from CMX. * It is not required to join a conference to use tones at any time. * * CMX: Is transparent when not used. When it is used, it will do * crossconnections and conferences via software if not possible through * hardware. If hardware capability is available, hardware is used. * * Echo: Is generated by CMX and is used to check performance of hard and * software CMX. * * The CMX has special functions for conferences with one, two and more * members. It will allow different types of data flow. Receive and transmit * data to/form upper layer may be swithed on/off individually without losing * features of CMX, Tones and DTMF. * * Echo Cancellation: Sometimes we like to cancel echo from the interface. * Note that a VoIP call may not have echo caused by the IP phone. The echo * is generated by the telephone line connected to it. Because the delay * is high, it becomes an echo. RESULT: Echo Cachelation is required if * both echo AND delay is applied to an interface. * Remember that software CMX always generates a more or less delay. * * If all used features can be realized in hardware, and if transmit and/or * receive data ist disabled, the card may not send/receive any data at all. * Not receiving is useful if only announcements are played. Not sending is * useful if an answering machine records audio. Not sending and receiving is * useful during most states of the call. If supported by hardware, tones * will be played without cpu load. Small PBXs and NT-Mode applications will * not need expensive hardware when processing calls. * * * LOCKING: * * When data is received from upper or lower layer (card), the complete dsp * module is locked by a global lock. This lock MUST lock irq, because it * must lock timer events by DSP poll timer. * When data is ready to be transmitted down, the data is queued and sent * outside lock and timer event. * PH_CONTROL must not change any settings, join or split conference members * during process of data. * * HDLC: * * It works quite the same as transparent, except that HDLC data is forwarded * to all other conference members if no hardware bridging is possible. * Send data will be writte to sendq. Sendq will be sent if confirm is received. * Conference cannot join, if one member is not hdlc. * */ #include <linux/delay.h> #include <linux/gfp.h> #include <linux/mISDNif.h> #include <linux/mISDNdsp.h> #include <linux/module.h> #include <linux/vmalloc.h> #include "core.h" #include "dsp.h" static const char *mISDN_dsp_revision = "2.0"; static int debug; static int options; static int poll; static int dtmfthreshold = 100; MODULE_AUTHOR("Andreas Eversberg"); module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(options, uint, S_IRUGO | S_IWUSR); module_param(poll, uint, S_IRUGO | S_IWUSR); module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR); MODULE_LICENSE("GPL"); /*int spinnest = 0;*/ spinlock_t dsp_lock; /* global dsp lock */ struct list_head dsp_ilist; struct list_head conf_ilist; int dsp_debug; int dsp_options; int dsp_poll, dsp_tics; /* check if rx may be turned off or must be turned on */ static void dsp_rx_off_member(struct dsp *dsp) { struct mISDN_ctrl_req cq; int rx_off = 1; memset(&cq, 0, sizeof(cq)); if (!dsp->features_rx_off) return; /* not disabled */ if (!dsp->rx_disabled) rx_off = 0; /* software dtmf */ else if (dsp->dtmf.software) rx_off = 0; /* echo in software */ else if (dsp->echo.software) rx_off = 0; /* bridge in software */ else if (dsp->conf && dsp->conf->software) rx_off = 0; /* data is not required by user space and not required * for echo dtmf detection, soft-echo, soft-bridging */ if (rx_off == dsp->rx_is_off) return; if (!dsp->ch.peer) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: no peer, no rx_off\n", __func__); return; } cq.op = MISDN_CTRL_RX_OFF; cq.p1 = rx_off; if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n", __func__); return; } dsp->rx_is_off = rx_off; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: %s set rx_off = %d\n", __func__, dsp->name, rx_off); } static void dsp_rx_off(struct dsp *dsp) { struct dsp_conf_member *member; if (dsp_options & DSP_OPT_NOHARDWARE) return; /* no conf */ if (!dsp->conf) { dsp_rx_off_member(dsp); return; } /* check all members in conf */ list_for_each_entry(member, &dsp->conf->mlist, list) { dsp_rx_off_member(member->dsp); } } /* enable "fill empty" feature */ static void dsp_fill_empty(struct dsp *dsp) { struct mISDN_ctrl_req cq; memset(&cq, 0, sizeof(cq)); if (!dsp->ch.peer) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: no peer, no fill_empty\n", __func__); return; } cq.op = MISDN_CTRL_FILL_EMPTY; cq.p1 = 1; cq.p2 = dsp_silence; if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n", __func__); return; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: %s set fill_empty = 1\n", __func__, dsp->name); } static int dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) { struct sk_buff *nskb; int ret = 0; int cont; u8 *data; int len; if (skb->len < sizeof(int)) printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); cont = *((int *)skb->data); len = skb->len - sizeof(int); data = skb->data + sizeof(int); switch (cont) { case DTMF_TONE_START: /* turn on DTMF */ if (dsp->hdlc) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: start dtmf\n", __func__); if (len == sizeof(int)) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_NOTICE "changing DTMF Threshold " "to %d\n", *((int *)data)); dsp->dtmf.treshold = (*(int *)data) * 10000; } dsp->dtmf.enable = 1; /* init goertzel */ dsp_dtmf_goertzel_init(dsp); /* check dtmf hardware */ dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); break; case DTMF_TONE_STOP: /* turn off DTMF */ if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: stop dtmf\n", __func__); dsp->dtmf.enable = 0; dsp->dtmf.hardware = 0; dsp->dtmf.software = 0; break; case DSP_CONF_JOIN: /* join / update conference */ if (len < sizeof(int)) { ret = -EINVAL; break; } if (*((u32 *)data) == 0) goto conf_split; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: join conference %d\n", __func__, *((u32 *)data)); ret = dsp_cmx_conf(dsp, *((u32 *)data)); /* dsp_cmx_hardware will also be called here */ dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_CONF_SPLIT: /* remove from conference */ conf_split: if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: release conference\n", __func__); ret = dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called here */ if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); dsp_rx_off(dsp); break; case DSP_TONE_PATT_ON: /* play tone */ if (dsp->hdlc) { ret = -EINVAL; break; } if (len < sizeof(int)) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: turn tone 0x%x on\n", __func__, *((int *)skb->data)); ret = dsp_tone(dsp, *((int *)data)); if (!ret) { dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); } if (!dsp->tone.tone) goto tone_off; break; case DSP_TONE_PATT_OFF: /* stop tone */ if (dsp->hdlc) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: turn tone off\n", __func__); dsp_tone(dsp, 0); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); /* reset tx buffers (user space data) */ tone_off: dsp->rx_W = 0; dsp->rx_R = 0; break; case DSP_VOL_CHANGE_TX: /* change volume */ if (dsp->hdlc) { ret = -EINVAL; break; } if (len < sizeof(int)) { ret = -EINVAL; break; } dsp->tx_volume = *((int *)data); if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: change tx vol to %d\n", __func__, dsp->tx_volume); dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); break; case DSP_VOL_CHANGE_RX: /* change volume */ if (dsp->hdlc) { ret = -EINVAL; break; } if (len < sizeof(int)) { ret = -EINVAL; break; } dsp->rx_volume = *((int *)data); if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: change rx vol to %d\n", __func__, dsp->tx_volume); dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); break; case DSP_ECHO_ON: /* enable echo */ dsp->echo.software = 1; /* soft echo */ if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: enable cmx-echo\n", __func__); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_ECHO_OFF: /* disable echo */ dsp->echo.software = 0; dsp->echo.hardware = 0; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: disable cmx-echo\n", __func__); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_RECEIVE_ON: /* enable receive to user space */ if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: enable receive to user " "space\n", __func__); dsp->rx_disabled = 0; dsp_rx_off(dsp); break; case DSP_RECEIVE_OFF: /* disable receive to user space */ if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: disable receive to " "user space\n", __func__); dsp->rx_disabled = 1; dsp_rx_off(dsp); break; case DSP_MIX_ON: /* enable mixing of tx data */ if (dsp->hdlc) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: enable mixing of " "tx-data with conf mebers\n", __func__); dsp->tx_mix = 1; dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_MIX_OFF: /* disable mixing of tx data */ if (dsp->hdlc) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: disable mixing of " "tx-data with conf mebers\n", __func__); dsp->tx_mix = 0; dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_TXDATA_ON: /* enable txdata */ dsp->tx_data = 1; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: enable tx-data\n", __func__); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_TXDATA_OFF: /* disable txdata */ dsp->tx_data = 0; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: disable tx-data\n", __func__); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); if (dsp_debug & DEBUG_DSP_CMX) dsp_cmx_debug(dsp); break; case DSP_DELAY: /* use delay algorithm instead of dynamic jitter algorithm */ if (dsp->hdlc) { ret = -EINVAL; break; } if (len < sizeof(int)) { ret = -EINVAL; break; } dsp->cmx_delay = (*((int *)data)) << 3; /* milliseconds to samples */ if (dsp->cmx_delay >= (CMX_BUFF_HALF >> 1)) /* clip to half of maximum usable buffer (half of half buffer) */ dsp->cmx_delay = (CMX_BUFF_HALF >> 1) - 1; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: use delay algorithm to " "compensate jitter (%d samples)\n", __func__, dsp->cmx_delay); break; case DSP_JITTER: /* use dynamic jitter algorithm instead of delay algorithm */ if (dsp->hdlc) { ret = -EINVAL; break; } dsp->cmx_delay = 0; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: use jitter algorithm to " "compensate jitter\n", __func__); break; case DSP_TX_DEJITTER: /* use dynamic jitter algorithm for tx-buffer */ if (dsp->hdlc) { ret = -EINVAL; break; } dsp->tx_dejitter = 1; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: use dejitter on TX " "buffer\n", __func__); break; case DSP_TX_DEJ_OFF: /* use tx-buffer without dejittering*/ if (dsp->hdlc) { ret = -EINVAL; break; } dsp->tx_dejitter = 0; if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: use TX buffer without " "dejittering\n", __func__); break; case DSP_PIPELINE_CFG: if (dsp->hdlc) { ret = -EINVAL; break; } if (len > 0 && ((char *)data)[len - 1]) { printk(KERN_DEBUG "%s: pipeline config string " "is not NULL terminated!\n", __func__); ret = -EINVAL; } else { dsp->pipeline.inuse = 1; dsp_cmx_hardware(dsp->conf, dsp); ret = dsp_pipeline_build(&dsp->pipeline, len > 0 ? data : NULL); dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); } break; case DSP_BF_ENABLE_KEY: /* turn blowfish on */ if (dsp->hdlc) { ret = -EINVAL; break; } if (len < 4 || len > 56) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: turn blowfish on (key " "not shown)\n", __func__); ret = dsp_bf_init(dsp, (u8 *)data, len); /* set new cont */ if (!ret) cont = DSP_BF_ACCEPT; else cont = DSP_BF_REJECT; /* send indication if it worked to set it */ nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY, sizeof(int), &cont, GFP_ATOMIC); if (nskb) { if (dsp->up) { if (dsp->up->send(dsp->up, nskb)) dev_kfree_skb(nskb); } else dev_kfree_skb(nskb); } if (!ret) { dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); } break; case DSP_BF_DISABLE: /* turn blowfish off */ if (dsp->hdlc) { ret = -EINVAL; break; } if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: turn blowfish off\n", __func__); dsp_bf_cleanup(dsp); dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); break; default: if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: ctrl req %x unhandled\n", __func__, cont); ret = -EINVAL; } return ret; } static void get_features(struct mISDNchannel *ch) { struct dsp *dsp = container_of(ch, struct dsp, ch); struct mISDN_ctrl_req cq; if (!ch->peer) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: no peer, no features\n", __func__); return; } memset(&cq, 0, sizeof(cq)); cq.op = MISDN_CTRL_GETOP; if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) { printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n", __func__); return; } if (cq.op & MISDN_CTRL_RX_OFF) dsp->features_rx_off = 1; if (cq.op & MISDN_CTRL_FILL_EMPTY) dsp->features_fill_empty = 1; if (dsp_options & DSP_OPT_NOHARDWARE) return; if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) { cq.op = MISDN_CTRL_HW_FEATURES; *((u_long *)&cq.p1) = (u_long)&dsp->features; if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) { printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n", __func__); } } else if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: features not supported for %s\n", __func__, dsp->name); } static int dsp_function(struct mISDNchannel *ch, struct sk_buff *skb) { struct dsp *dsp = container_of(ch, struct dsp, ch); struct mISDNhead *hh; int ret = 0; u8 *digits = NULL; u_long flags; hh = mISDN_HEAD_P(skb); switch (hh->prim) { /* FROM DOWN */ case (PH_DATA_CNF): dsp->data_pending = 0; /* trigger next hdlc frame, if any */ if (dsp->hdlc) { spin_lock_irqsave(&dsp_lock, flags); if (dsp->b_active) schedule_work(&dsp->workq); spin_unlock_irqrestore(&dsp_lock, flags); } break; case (PH_DATA_IND): case (DL_DATA_IND): if (skb->len < 1) { ret = -EINVAL; break; } if (dsp->rx_is_off) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: rx-data during rx_off" " for %s\n", __func__, dsp->name); } if (dsp->hdlc) { /* hdlc */ spin_lock_irqsave(&dsp_lock, flags); dsp_cmx_hdlc(dsp, skb); spin_unlock_irqrestore(&dsp_lock, flags); if (dsp->rx_disabled) { /* if receive is not allowed */ break; } hh->prim = DL_DATA_IND; if (dsp->up) return dsp->up->send(dsp->up, skb); break; } spin_lock_irqsave(&dsp_lock, flags); /* decrypt if enabled */ if (dsp->bf_enable) dsp_bf_decrypt(dsp, skb->data, skb->len); /* pipeline */ if (dsp->pipeline.inuse) dsp_pipeline_process_rx(&dsp->pipeline, skb->data, skb->len, hh->id); /* change volume if requested */ if (dsp->rx_volume) dsp_change_volume(skb, dsp->rx_volume); /* check if dtmf soft decoding is turned on */ if (dsp->dtmf.software) { digits = dsp_dtmf_goertzel_decode(dsp, skb->data, skb->len, (dsp_options & DSP_OPT_ULAW) ? 1 : 0); } /* we need to process receive data if software */ if (dsp->conf && dsp->conf->software) { /* process data from card at cmx */ dsp_cmx_receive(dsp, skb); } spin_unlock_irqrestore(&dsp_lock, flags); /* send dtmf result, if any */ if (digits) { while (*digits) { int k; struct sk_buff *nskb; if (dsp_debug & DEBUG_DSP_DTMF) printk(KERN_DEBUG "%s: digit" "(%c) to layer %s\n", __func__, *digits, dsp->name); k = *digits | DTMF_TONE_VAL; nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY, sizeof(int), &k, GFP_ATOMIC); if (nskb) { if (dsp->up) { if (dsp->up->send( dsp->up, nskb)) dev_kfree_skb(nskb); } else dev_kfree_skb(nskb); } digits++; } } if (dsp->rx_disabled) { /* if receive is not allowed */ break; } hh->prim = DL_DATA_IND; if (dsp->up) return dsp->up->send(dsp->up, skb); break; case (PH_CONTROL_IND): if (dsp_debug & DEBUG_DSP_DTMFCOEFF) printk(KERN_DEBUG "%s: PH_CONTROL INDICATION " "received: %x (len %d) %s\n", __func__, hh->id, skb->len, dsp->name); switch (hh->id) { case (DTMF_HFC_COEF): /* getting coefficients */ if (!dsp->dtmf.hardware) { if (dsp_debug & DEBUG_DSP_DTMFCOEFF) printk(KERN_DEBUG "%s: ignoring DTMF " "coefficients from HFC\n", __func__); break; } digits = dsp_dtmf_goertzel_decode(dsp, skb->data, skb->len, 2); while (*digits) { int k; struct sk_buff *nskb; if (dsp_debug & DEBUG_DSP_DTMF) printk(KERN_DEBUG "%s: digit" "(%c) to layer %s\n", __func__, *digits, dsp->name); k = *digits | DTMF_TONE_VAL; nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY, sizeof(int), &k, GFP_ATOMIC); if (nskb) { if (dsp->up) { if (dsp->up->send( dsp->up, nskb)) dev_kfree_skb(nskb); } else dev_kfree_skb(nskb); } digits++; } break; case (HFC_VOL_CHANGE_TX): /* change volume */ if (skb->len != sizeof(int)) { ret = -EINVAL; break; } spin_lock_irqsave(&dsp_lock, flags); dsp->tx_volume = *((int *)skb->data); if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: change tx volume to " "%d\n", __func__, dsp->tx_volume); dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); spin_unlock_irqrestore(&dsp_lock, flags); break; default: if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: ctrl ind %x unhandled " "%s\n", __func__, hh->id, dsp->name); ret = -EINVAL; } break; case (PH_ACTIVATE_IND): case (PH_ACTIVATE_CNF): if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: b_channel is now active %s\n", __func__, dsp->name); /* bchannel now active */ spin_lock_irqsave(&dsp_lock, flags); dsp->b_active = 1; dsp->data_pending = 0; dsp->rx_init = 1; /* rx_W and rx_R will be adjusted on first frame */ dsp->rx_W = 0; dsp->rx_R = 0; memset(dsp->rx_buff, 0, sizeof(dsp->rx_buff)); dsp_cmx_hardware(dsp->conf, dsp); dsp_dtmf_hardware(dsp); dsp_rx_off(dsp); spin_unlock_irqrestore(&dsp_lock, flags); if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: done with activation, sending " "confirm to user space. %s\n", __func__, dsp->name); /* send activation to upper layer */ hh->prim = DL_ESTABLISH_CNF; if (dsp->up) return dsp->up->send(dsp->up, skb); break; case (PH_DEACTIVATE_IND): case (PH_DEACTIVATE_CNF): if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: b_channel is now inactive %s\n", __func__, dsp->name); /* bchannel now inactive */ spin_lock_irqsave(&dsp_lock, flags); dsp->b_active = 0; dsp->data_pending = 0; dsp_cmx_hardware(dsp->conf, dsp); dsp_rx_off(dsp); spin_unlock_irqrestore(&dsp_lock, flags); hh->prim = DL_RELEASE_CNF; if (dsp->up) return dsp->up->send(dsp->up, skb); break; /* FROM UP */ case (DL_DATA_REQ): case (PH_DATA_REQ): if (skb->len < 1) { ret = -EINVAL; break; } if (dsp->hdlc) { /* hdlc */ if (!dsp->b_active) { ret = -EIO; break; } hh->prim = PH_DATA_REQ; spin_lock_irqsave(&dsp_lock, flags); skb_queue_tail(&dsp->sendq, skb); schedule_work(&dsp->workq); spin_unlock_irqrestore(&dsp_lock, flags); return 0; } /* send data to tx-buffer (if no tone is played) */ if (!dsp->tone.tone) { spin_lock_irqsave(&dsp_lock, flags); dsp_cmx_transmit(dsp, skb); spin_unlock_irqrestore(&dsp_lock, flags); } break; case (PH_CONTROL_REQ): spin_lock_irqsave(&dsp_lock, flags); ret = dsp_control_req(dsp, hh, skb); spin_unlock_irqrestore(&dsp_lock, flags); break; case (DL_ESTABLISH_REQ): case (PH_ACTIVATE_REQ): if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: activating b_channel %s\n", __func__, dsp->name); if (dsp->dtmf.hardware || dsp->dtmf.software) dsp_dtmf_goertzel_init(dsp); get_features(ch); /* enable fill_empty feature */ if (dsp->features_fill_empty) dsp_fill_empty(dsp); /* send ph_activate */ hh->prim = PH_ACTIVATE_REQ; if (ch->peer) return ch->recv(ch->peer, skb); break; case (DL_RELEASE_REQ): case (PH_DEACTIVATE_REQ): if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: releasing b_channel %s\n", __func__, dsp->name); spin_lock_irqsave(&dsp_lock, flags); dsp->tone.tone = 0; dsp->tone.hardware = 0; dsp->tone.software = 0; if (timer_pending(&dsp->tone.tl)) del_timer(&dsp->tone.tl); if (dsp->conf) dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called here */ skb_queue_purge(&dsp->sendq); spin_unlock_irqrestore(&dsp_lock, flags); hh->prim = PH_DEACTIVATE_REQ; if (ch->peer) return ch->recv(ch->peer, skb); break; default: if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: msg %x unhandled %s\n", __func__, hh->prim, dsp->name); ret = -EINVAL; } if (!ret) dev_kfree_skb(skb); return ret; } static int dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg) { struct dsp *dsp = container_of(ch, struct dsp, ch); u_long flags; int err = 0; if (debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); switch (cmd) { case OPEN_CHANNEL: break; case CLOSE_CHANNEL: if (dsp->ch.peer) dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL); /* wait until workqueue has finished, * must lock here, or we may hit send-process currently * queueing. */ spin_lock_irqsave(&dsp_lock, flags); dsp->b_active = 0; spin_unlock_irqrestore(&dsp_lock, flags); /* MUST not be locked, because it waits until queue is done. */ cancel_work_sync(&dsp->workq); spin_lock_irqsave(&dsp_lock, flags); if (timer_pending(&dsp->tone.tl)) del_timer(&dsp->tone.tl); skb_queue_purge(&dsp->sendq); if (dsp_debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s: releasing member %s\n", __func__, dsp->name); dsp->b_active = 0; dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called here */ dsp_pipeline_destroy(&dsp->pipeline); if (dsp_debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s: remove & destroy object %s\n", __func__, dsp->name); list_del(&dsp->list); spin_unlock_irqrestore(&dsp_lock, flags); if (dsp_debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s: dsp instance released\n", __func__); vfree(dsp); module_put(THIS_MODULE); break; } return err; } static void dsp_send_bh(struct work_struct *work) { struct dsp *dsp = container_of(work, struct dsp, workq); struct sk_buff *skb; struct mISDNhead *hh; if (dsp->hdlc && dsp->data_pending) return; /* wait until data has been acknowledged */ /* send queued data */ while ((skb = skb_dequeue(&dsp->sendq))) { /* in locked date, we must have still data in queue */ if (dsp->data_pending) { if (dsp_debug & DEBUG_DSP_CORE) printk(KERN_DEBUG "%s: fifo full %s, this is " "no bug!\n", __func__, dsp->name); /* flush transparent data, if not acked */ dev_kfree_skb(skb); continue; } hh = mISDN_HEAD_P(skb); if (hh->prim == DL_DATA_REQ) { /* send packet up */ if (dsp->up) { if (dsp->up->send(dsp->up, skb)) dev_kfree_skb(skb); } else dev_kfree_skb(skb); } else { /* send packet down */ if (dsp->ch.peer) { dsp->data_pending = 1; if (dsp->ch.recv(dsp->ch.peer, skb)) { dev_kfree_skb(skb); dsp->data_pending = 0; } } else dev_kfree_skb(skb); } } } static int dspcreate(struct channel_req *crq) { struct dsp *ndsp; u_long flags; if (crq->protocol != ISDN_P_B_L2DSP && crq->protocol != ISDN_P_B_L2DSPHDLC) return -EPROTONOSUPPORT; ndsp = vzalloc(sizeof(struct dsp)); if (!ndsp) { printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__); return -ENOMEM; } if (dsp_debug & DEBUG_DSP_CTRL) printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__); /* default enabled */ INIT_WORK(&ndsp->workq, (void *)dsp_send_bh); skb_queue_head_init(&ndsp->sendq); ndsp->ch.send = dsp_function; ndsp->ch.ctrl = dsp_ctrl; ndsp->up = crq->ch; crq->ch = &ndsp->ch; if (crq->protocol == ISDN_P_B_L2DSP) { crq->protocol = ISDN_P_B_RAW; ndsp->hdlc = 0; } else { crq->protocol = ISDN_P_B_HDLC; ndsp->hdlc = 1; } if (!try_module_get(THIS_MODULE)) printk(KERN_WARNING "%s:cannot get module\n", __func__); sprintf(ndsp->name, "DSP_C%x(0x%p)", ndsp->up->st->dev->id + 1, ndsp); /* set frame size to start */ ndsp->features.hfc_id = -1; /* current PCM id */ ndsp->features.pcm_id = -1; /* current PCM id */ ndsp->pcm_slot_rx = -1; /* current CPM slot */ ndsp->pcm_slot_tx = -1; ndsp->pcm_bank_rx = -1; ndsp->pcm_bank_tx = -1; ndsp->hfc_conf = -1; /* current conference number */ /* set tone timer */ ndsp->tone.tl.function = (void *)dsp_tone_timeout; ndsp->tone.tl.data = (long) ndsp; init_timer(&ndsp->tone.tl); if (dtmfthreshold < 20 || dtmfthreshold > 500) dtmfthreshold = 200; ndsp->dtmf.treshold = dtmfthreshold * 10000; /* init pipeline append to list */ spin_lock_irqsave(&dsp_lock, flags); dsp_pipeline_init(&ndsp->pipeline); list_add_tail(&ndsp->list, &dsp_ilist); spin_unlock_irqrestore(&dsp_lock, flags); return 0; } static struct Bprotocol DSP = { .Bprotocols = (1 << (ISDN_P_B_L2DSP & ISDN_P_B_MASK)) | (1 << (ISDN_P_B_L2DSPHDLC & ISDN_P_B_MASK)), .name = "dsp", .create = dspcreate }; static int __init dsp_init(void) { int err; int tics; printk(KERN_INFO "DSP module %s\n", mISDN_dsp_revision); dsp_options = options; dsp_debug = debug; /* set packet size */ dsp_poll = poll; if (dsp_poll) { if (dsp_poll > MAX_POLL) { printk(KERN_ERR "%s: Wrong poll value (%d), use %d " "maximum.\n", __func__, poll, MAX_POLL); err = -EINVAL; return err; } if (dsp_poll < 8) { printk(KERN_ERR "%s: Wrong poll value (%d), use 8 " "minimum.\n", __func__, dsp_poll); err = -EINVAL; return err; } dsp_tics = poll * HZ / 8000; if (dsp_tics * 8000 != poll * HZ) { printk(KERN_INFO "mISDN_dsp: Cannot clock every %d " "samples (0,125 ms). It is not a multiple of " "%d HZ.\n", poll, HZ); err = -EINVAL; return err; } } else { poll = 8; while (poll <= MAX_POLL) { tics = (poll * HZ) / 8000; if (tics * 8000 == poll * HZ) { dsp_tics = tics; dsp_poll = poll; if (poll >= 64) break; } poll++; } } if (dsp_poll == 0) { printk(KERN_INFO "mISDN_dsp: There is no multiple of kernel " "clock that equals exactly the duration of 8-256 " "samples. (Choose kernel clock speed like 100, 250, " "300, 1000)\n"); err = -EINVAL; return err; } printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals " "%d jiffies.\n", dsp_poll, dsp_tics); spin_lock_init(&dsp_lock); INIT_LIST_HEAD(&dsp_ilist); INIT_LIST_HEAD(&conf_ilist); /* init conversion tables */ dsp_audio_generate_law_tables(); dsp_silence = (dsp_options & DSP_OPT_ULAW) ? 0xff : 0x2a; dsp_audio_law_to_s32 = (dsp_options & DSP_OPT_ULAW) ? dsp_audio_ulaw_to_s32 : dsp_audio_alaw_to_s32; dsp_audio_generate_s2law_table(); dsp_audio_generate_seven(); dsp_audio_generate_mix_table(); if (dsp_options & DSP_OPT_ULAW) dsp_audio_generate_ulaw_samples(); dsp_audio_generate_volume_changes(); err = dsp_pipeline_module_init(); if (err) { printk(KERN_ERR "mISDN_dsp: Can't initialize pipeline, " "error(%d)\n", err); return err; } err = mISDN_register_Bprotocol(&DSP); if (err) { printk(KERN_ERR "Can't register %s error(%d)\n", DSP.name, err); return err; } /* set sample timer */ dsp_spl_tl.function = (void *)dsp_cmx_send; dsp_spl_tl.data = 0; init_timer(&dsp_spl_tl); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); return 0; } static void __exit dsp_cleanup(void) { mISDN_unregister_Bprotocol(&DSP); del_timer_sync(&dsp_spl_tl); if (!list_empty(&dsp_ilist)) { printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not " "empty.\n"); } if (!list_empty(&conf_ilist)) { printk(KERN_ERR "mISDN_dsp: Conference list not empty. Not " "all memory freed.\n"); } dsp_pipeline_module_exit(); } module_init(dsp_init); module_exit(dsp_cleanup);
gpl-2.0
SanziShi/KVMGT-kernel
arch/frv/mm/init.c
2272
4584
/* init.c: memory initialisation for FRV * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Derived from: * - linux/arch/m68knommu/mm/init.c * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>, * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com) * - linux/arch/m68k/mm/init.c * - Copyright (C) 1995 Hamish Macdonald */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <linux/swap.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/module.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/virtconvert.h> #include <asm/sections.h> #include <asm/tlb.h> #undef DEBUG /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving a inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized * to point to BAD_PAGE entries. * * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */ static unsigned long empty_bad_page_table; static unsigned long empty_bad_page; unsigned long empty_zero_page; EXPORT_SYMBOL(empty_zero_page); /*****************************************************************************/ /* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; /* allocate some pages for kernel housekeeping tasks */ empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM if (get_num_physpages() - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); pue = pud_offset(pge, PKMAP_BASE); pme = pmd_offset(pue, PKMAP_BASE); __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE); } #endif /* distribute the allocatable pages across the various zones and pass them to the allocator */ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = get_num_physpages() - num_mappedpages; #endif free_area_init(zones_size); #ifdef CONFIG_MMU /* initialise init's MMU context */ init_new_context(&init_task, &init_mm); #endif } /* end paging_init() */ /*****************************************************************************/ /* * */ void __init mem_init(void) { unsigned long code_size = _etext - _stext; /* this will put all low memory onto the freelists */ free_all_bootmem(); #if defined(CONFIG_MMU) && defined(CONFIG_HIGHMEM) { unsigned long pfn; for (pfn = get_num_physpages() - 1; pfn >= num_mappedpages; pfn--) free_highmem_page(&mem_map[pfn]); } #endif mem_init_print_info(NULL); if (rom_length > 0 && rom_length >= code_size) printk("Memory available: %luKiB/%luKiB ROM\n", (rom_length - code_size) >> 10, rom_length >> 10); } /* end mem_init() */ /*****************************************************************************/ /* * free the memory that was only required for initialisation */ void free_initmem(void) { #if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL) free_initmem_default(-1); #endif } /* end free_initmem() */ /*****************************************************************************/ /* * free the initial ramdisk memory */ #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { free_reserved_area((void *)start, (void *)end, -1, "initrd"); } /* end free_initrd_mem() */ #endif
gpl-2.0
ibrahima/kernel_i9300
arch/arm/mach-mmp/mmp2.c
2528
6447
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/regs-apmu.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/gpio.h> #include <mach/devices.h> #include <mach/mmp2.h> #include "common.h" #include "clock.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) #define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x9c) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { unsigned long mfpr_pmic, data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } static void __init mmp2_init_gpio(void) { int i; /* enable GPIO clock */ __raw_writel(APBC_APBCLK | APBC_FNCLK, APBC_MMP2_GPIO); /* unmask GPIO edge detection for all 6 banks -- APMASKx */ for (i = 0; i < 6; i++) __raw_writel(0xffffffff, APMASK(i)); pxa_init_gpio(IRQ_MMP2_GPIO, 0, 167, NULL); } void __init mmp2_init_irq(void) { mmp2_init_icu(); mmp2_init_gpio(); } static void sdhc_clk_enable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst |= clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } static void sdhc_clk_disable(struct clk *clk) { uint32_t clk_rst; clk_rst = __raw_readl(clk->clk_rst); clk_rst &= ~clk->enable_val; __raw_writel(clk_rst, clk->clk_rst); } struct clkops sdhc_clk_ops = { .enable = sdhc_clk_enable, .disable = sdhc_clk_disable, }; /* APB peripheral clocks */ static APBC_CLK(uart1, MMP2_UART1, 1, 26000000); static APBC_CLK(uart2, MMP2_UART2, 1, 26000000); static APBC_CLK(uart3, MMP2_UART3, 1, 26000000); static APBC_CLK(uart4, MMP2_UART4, 1, 26000000); static APBC_CLK(twsi1, MMP2_TWSI1, 0, 26000000); static APBC_CLK(twsi2, MMP2_TWSI2, 0, 26000000); static APBC_CLK(twsi3, MMP2_TWSI3, 0, 26000000); static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000); static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000); static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000); static APMU_CLK(nand, NAND, 0xbf, 100000000); static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops); static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops); static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL), INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL), INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"), INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"), }; static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); } return 0; } postcore_initcall(mmp2_init); static void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_MMP2_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_MMP2_TIMERS); timer_init(IRQ_MMP2_TIMER1); } struct sys_timer mmp2_timer = { .init = mmp2_timer_init, }; /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120);
gpl-2.0
linyvxiang/linux-zswap
drivers/scsi/nsp32.c
2528
90538
/* * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver * Copyright (C) 2001, 2002, 2003 * YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> * GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Revision History: * 1.0: Initial Release. * 1.1: Add /proc SDTR status. * Remove obsolete error handler nsp32_reset. * Some clean up. * 1.2: PowerPC (big endian) support. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include "nsp32.h" /*********************************************************************** * Module parameters */ static int trans_mode = 0; /* default: BIOS */ module_param (trans_mode, int, 0); MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M"); #define ASYNC_MODE 1 #define ULTRA20M_MODE 2 static bool auto_param = 0; /* default: ON */ module_param (auto_param, bool, 0); MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)"); static bool disc_priv = 1; /* default: OFF */ module_param (disc_priv, bool, 0); MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))"); MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>"); MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module"); MODULE_LICENSE("GPL"); static const char *nsp32_release_version = "1.2"; /**************************************************************************** * Supported hardware */ static struct pci_device_id nsp32_pci_table[] = { { .vendor = PCI_VENDOR_ID_IODATA, .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_IODATA, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_KME, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_KME, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_WORKBIT_STANDARD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, {0,0,}, }; MODULE_DEVICE_TABLE(pci, nsp32_pci_table); static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */ /* * Period/AckWidth speed conversion table * * Note: This period/ackwidth speed table must be in descending order. */ static nsp32_sync_table nsp32_sync_table_40M[] = { /* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */ {0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */ {0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */ {0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */ {0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */ {0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */ {0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */ {0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ }; static nsp32_sync_table nsp32_sync_table_20M[] = { {0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */ {0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ {0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */ {0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */ {0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */ {0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */ {0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */ }; static nsp32_sync_table nsp32_sync_table_pci[] = { {0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */ {0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */ {0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */ {0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */ {0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */ {0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */ {0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */ {0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */ {0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */ }; /* * function declaration */ /* module entry point */ static int nsp32_probe (struct pci_dev *, const struct pci_device_id *); static void nsp32_remove(struct pci_dev *); static int __init init_nsp32 (void); static void __exit exit_nsp32 (void); /* struct struct scsi_host_template */ static int nsp32_show_info (struct seq_file *, struct Scsi_Host *); static int nsp32_detect (struct pci_dev *pdev); static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static const char *nsp32_info (struct Scsi_Host *); static int nsp32_release (struct Scsi_Host *); /* SCSI error handler */ static int nsp32_eh_abort (struct scsi_cmnd *); static int nsp32_eh_bus_reset (struct scsi_cmnd *); static int nsp32_eh_host_reset(struct scsi_cmnd *); /* generate SCSI message */ static void nsp32_build_identify(struct scsi_cmnd *); static void nsp32_build_nop (struct scsi_cmnd *); static void nsp32_build_reject (struct scsi_cmnd *); static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char); /* SCSI message handler */ static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short); static void nsp32_msgout_occur (struct scsi_cmnd *); static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short); static int nsp32_setup_sg_table (struct scsi_cmnd *); static int nsp32_selection_autopara(struct scsi_cmnd *); static int nsp32_selection_autoscsi(struct scsi_cmnd *); static void nsp32_scsi_done (struct scsi_cmnd *); static int nsp32_arbitration (struct scsi_cmnd *, unsigned int); static int nsp32_reselection (struct scsi_cmnd *, unsigned char); static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int); static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short); /* SCSI SDTR */ static void nsp32_analyze_sdtr (struct scsi_cmnd *); static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char); static void nsp32_set_async (nsp32_hw_data *, nsp32_target *); static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *); static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char); /* SCSI bus status handler */ static void nsp32_wait_req (nsp32_hw_data *, int); static void nsp32_wait_sack (nsp32_hw_data *, int); static void nsp32_sack_assert (nsp32_hw_data *); static void nsp32_sack_negate (nsp32_hw_data *); static void nsp32_do_bus_reset(nsp32_hw_data *); /* hardware interrupt handler */ static irqreturn_t do_nsp32_isr(int, void *); /* initialize hardware */ static int nsp32hw_init(nsp32_hw_data *); /* EEPROM handler */ static int nsp32_getprom_param (nsp32_hw_data *); static int nsp32_getprom_at24 (nsp32_hw_data *); static int nsp32_getprom_c16 (nsp32_hw_data *); static void nsp32_prom_start (nsp32_hw_data *); static void nsp32_prom_stop (nsp32_hw_data *); static int nsp32_prom_read (nsp32_hw_data *, int); static int nsp32_prom_read_bit (nsp32_hw_data *); static void nsp32_prom_write_bit(nsp32_hw_data *, int); static void nsp32_prom_set (nsp32_hw_data *, int, int); static int nsp32_prom_get (nsp32_hw_data *, int); /* debug/warning/info message */ static void nsp32_message (const char *, int, char *, char *, ...); #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *, int, int, char *, ...); #endif /* * max_sectors is currently limited up to 128. */ static struct scsi_host_template nsp32_template = { .proc_name = "nsp32", .name = "Workbit NinjaSCSI-32Bi/UDE", .show_info = nsp32_show_info, .info = nsp32_info, .queuecommand = nsp32_queuecommand, .can_queue = 1, .sg_tablesize = NSP32_SG_SIZE, .max_sectors = 128, .cmd_per_lun = 1, .this_id = NSP32_HOST_SCSIID, .use_clustering = DISABLE_CLUSTERING, .eh_abort_handler = nsp32_eh_abort, .eh_bus_reset_handler = nsp32_eh_bus_reset, .eh_host_reset_handler = nsp32_eh_host_reset, /* .highmem_io = 1, */ }; #include "nsp32_io.h" /*********************************************************************** * debug, error print */ #ifndef NSP32_DEBUG # define NSP32_DEBUG_MASK 0x000000 # define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args) # define nsp32_dbg(mask, args...) /* */ #else # define NSP32_DEBUG_MASK 0xffffff # define nsp32_msg(type, args...) \ nsp32_message (__func__, __LINE__, (type), args) # define nsp32_dbg(mask, args...) \ nsp32_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP32_DEBUG_QUEUECOMMAND BIT(0) #define NSP32_DEBUG_REGISTER BIT(1) #define NSP32_DEBUG_AUTOSCSI BIT(2) #define NSP32_DEBUG_INTR BIT(3) #define NSP32_DEBUG_SGLIST BIT(4) #define NSP32_DEBUG_BUSFREE BIT(5) #define NSP32_DEBUG_CDB_CONTENTS BIT(6) #define NSP32_DEBUG_RESELECTION BIT(7) #define NSP32_DEBUG_MSGINOCCUR BIT(8) #define NSP32_DEBUG_EEPROM BIT(9) #define NSP32_DEBUG_MSGOUTOCCUR BIT(10) #define NSP32_DEBUG_BUSRESET BIT(11) #define NSP32_DEBUG_RESTART BIT(12) #define NSP32_DEBUG_SYNC BIT(13) #define NSP32_DEBUG_WAIT BIT(14) #define NSP32_DEBUG_TARGETFLAG BIT(15) #define NSP32_DEBUG_PROC BIT(16) #define NSP32_DEBUG_INIT BIT(17) #define NSP32_SPECIAL_PRINT_REGISTER BIT(20) #define NSP32_DEBUG_BUF_LEN 100 static void nsp32_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP32_DEBUG printk("%snsp32: %s\n", type, buf); #else printk("%snsp32: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP32_DEBUG_MASK) { printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif #ifdef NSP32_DEBUG # include "nsp32_debug.c" #else # define show_command(arg) /* */ # define show_busphase(arg) /* */ # define show_autophase(arg) /* */ #endif /* * IDENTIFY Message */ static void nsp32_build_identify(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; int mode = FALSE; /* XXX: Auto DiscPriv detection is progressing... */ if (disc_priv == 0) { /* mode = TRUE; */ } data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++; data->msgout_len = pos; } /* * SDTR Message Routine */ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt, unsigned char period, unsigned char offset) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR; pos++; data->msgoutbuf[pos] = period; pos++; data->msgoutbuf[pos] = offset; pos++; data->msgout_len = pos; } /* * No Operation Message */ static void nsp32_build_nop(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; if (pos != 0) { nsp32_msg(KERN_WARNING, "Some messages are already contained!"); return; } data->msgoutbuf[pos] = NOP; pos++; data->msgout_len = pos; } /* * Reject Message */ static void nsp32_build_reject(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = MESSAGE_REJECT; pos++; data->msgout_len = pos; } /* * timer */ #if 0 static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->host->io_port; nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time); if (time & (~TIMER_CNT_MASK)) { nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow"); } nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK); } #endif /* * set SCSI command and other parameter to asic, and start selection phase */ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); nsp32_autoparam *param = data->autoparam; unsigned char phase; int i, ret; unsigned int msgout; u16_le s; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * check bus free */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if (phase != BUSMON_BUS_FREE) { nsp32_msg(KERN_WARNING, "bus busy"); show_busphase(phase & BUSMON_PHASE_MASK); SCpnt->result = DID_BUS_BUSY << 16; return FALSE; } /* * message out * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; return FALSE; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ } else { /* data->msgout_len > 3 */ msgout = 0; } // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT)); // nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * setup asic parameter */ memset(param, 0, sizeof(nsp32_autoparam)); /* cdb */ for (i = 0; i < SCpnt->cmd_len; i++) { param->cdb[4 * i] = SCpnt->cmnd[i]; } /* outgoing messages */ param->msgout = cpu_to_le32(msgout); /* syncreg, ackwidth, target id, SREQ sampling rate */ param->syncreg = data->cur_target->syncreg; param->ackwidth = data->cur_target->ackwidth; param->target_id = BIT(host_id) | BIT(target); param->sample_reg = data->cur_target->sample_reg; // nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg); /* command control */ param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); /* transfer control */ s = 0; switch (data->trans_method) { case NSP32_TRANSFER_BUSMASTER: s |= BM_START; break; case NSP32_TRANSFER_MMIO: s |= CB_MMIO_MODE; break; case NSP32_TRANSFER_PIO: s |= CB_IO_MODE; break; default: nsp32_msg(KERN_ERR, "unknown trans_method"); break; } /* * OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits. * For bus master transfer, it's taken off. */ s |= (TRANSFER_GO | ALL_COUNTER_CLR); param->transfer_control = cpu_to_le16(s); /* sg table addr */ param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr); /* * transfer parameter to ASIC */ nsp32_write4(base, SGT_ADR, data->auto_paddr); nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER ); /* * Check arbitration */ ret = nsp32_arbitration(SCpnt, base); return ret; } /* * Selection with AUTO SCSI (without AUTO PARAMETER) */ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); unsigned char phase; int status; unsigned short command = 0; unsigned int msgout = 0; unsigned short execph; int i; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * IRQ disable */ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); /* * check bus line */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) { nsp32_msg(KERN_WARNING, "bus busy"); SCpnt->result = DID_BUS_BUSY << 16; status = 1; goto out; } /* * clear execph */ execph = nsp32_read2(base, SCSI_EXECUTE_PHASE); /* * clear FIFO counter to set CDBs */ nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER); /* * set CDB0 - CDB15 */ for (i = 0; i < SCpnt->cmd_len; i++) { nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]); } nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]); /* * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID */ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target)); /* * set SCSI MSGOUT REG * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; status = 1; goto out; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ nsp32_write4(base, SCSI_MSG_OUT, msgout); } else { /* data->msgout_len > 3 */ nsp32_write4(base, SCSI_MSG_OUT, 0); } /* * set selection timeout(= 250ms) */ nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * set SREQ hazard killer sampling rate * * TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz. * check other internal clock! */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); /* * set SYNCREG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x", nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH), nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x", data->msgout_len, msgout); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ command = 0; command |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { command |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { command |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { command |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, command); /* * start AUTO SCSI, kick off arbitration */ command = (CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); nsp32_write2(base, COMMAND_CONTROL, command); /* * Check arbitration */ status = nsp32_arbitration(SCpnt, base); out: /* * IRQ enable */ nsp32_write2(base, IRQ_CONTROL, 0); return status; } /* * Arbitration Status Check * * Note: Arbitration counter is waited during ARBIT_GO is not lifting. * Using udelay(1) consumes CPU time and system time, but * arbitration delay time is defined minimal 2.4us in SCSI * specification, thus udelay works as coarse grained wait timer. */ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base) { unsigned char arbit; int status = TRUE; int time = 0; do { arbit = nsp32_read1(base, ARBIT_STATUS); time++; } while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time <= ARBIT_TIMEOUT_TIME)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit: 0x%x, delay time: %d", arbit, time); if (arbit & ARBIT_WIN) { /* Arbitration succeeded */ SCpnt->result = DID_OK << 16; nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */ } else if (arbit & ARBIT_FAIL) { /* Arbitration failed */ SCpnt->result = DID_BUS_BUSY << 16; status = FALSE; } else { /* * unknown error or ARBIT_GO timeout, * something lock up! guess no connection. */ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout"); SCpnt->result = DID_NO_CONNECT << 16; status = FALSE; } /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); return status; } /* * reselection * * Note: This reselection routine is called from msgin_occur, * reselection target id&lun must be already set. * SCSI-2 says IDENTIFY implies RESTORE_POINTER operation. */ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char tmpid, newid; nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter"); /* * calculate reselected SCSI ID */ tmpid = nsp32_read1(base, RESELECT_ID); tmpid &= (~BIT(host_id)); newid = 0; while (tmpid) { if (tmpid & 1) { break; } tmpid >>= 1; newid++; } /* * If reselected New ID:LUN is not existed * or current nexus is not existed, unexpected * reselection is occurred. Send reject message. */ if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) { nsp32_msg(KERN_WARNING, "unknown id/lun"); return FALSE; } else if(data->lunt[newid][newlun].SCpnt == NULL) { nsp32_msg(KERN_WARNING, "no SCSI command is processing"); return FALSE; } data->cur_id = newid; data->cur_lun = newlun; data->cur_target = &(data->target[newid]); data->cur_lunt = &(data->lunt[newid][newlun]); /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); return TRUE; } /* * nsp32_setup_sg_table - build scatter gather list for transfer data * with bus master. * * Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time. */ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; struct scatterlist *sg; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; int num, i; u32_le l; if (sgt == NULL) { nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); return FALSE; } num = scsi_dma_map(SCpnt); if (!num) return TRUE; else if (num < 0) return FALSE; else { scsi_for_each_sg(SCpnt, sg, num, i) { /* * Build nsp32_sglist, substitute sg dma addresses. */ sgt[i].addr = cpu_to_le32(sg_dma_address(sg)); sgt[i].len = cpu_to_le32(sg_dma_len(sg)); if (le32_to_cpu(sgt[i].len) > 0x10000) { nsp32_msg(KERN_ERR, "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len)); return FALSE; } nsp32_dbg(NSP32_DEBUG_SGLIST, "num 0x%x : addr 0x%lx len 0x%lx", i, le32_to_cpu(sgt[i].addr), le32_to_cpu(sgt[i].len )); } /* set end mark */ l = le32_to_cpu(sgt[num-1].len); sgt[num-1].len = cpu_to_le32(l | SGTEND); } return TRUE; } static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target; nsp32_lunt *cur_lunt; int ret; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); if (data->CurrentSC != NULL) { nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); data->CurrentSC = NULL; SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } /* check target ID is not same as this initiator ID */ if (scmd_id(SCpnt) == SCpnt->device->host->this_id) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } /* check target LUN is allowable value */ if (SCpnt->device->lun >= MAX_LUN) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } show_command(SCpnt); SCpnt->scsi_done = done; data->CurrentSC = SCpnt; SCpnt->SCp.Status = CHECK_CONDITION; SCpnt->SCp.Message = 0; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt); SCpnt->SCp.this_residual = scsi_bufflen(SCpnt); SCpnt->SCp.buffer = NULL; SCpnt->SCp.buffers_residual = 0; /* initialize data */ data->msgout_len = 0; data->msgin_len = 0; cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]); cur_lunt->SCpnt = SCpnt; cur_lunt->save_datp = 0; cur_lunt->msgin03 = FALSE; data->cur_lunt = cur_lunt; data->cur_id = SCpnt->device->id; data->cur_lun = SCpnt->device->lun; ret = nsp32_setup_sg_table(SCpnt); if (ret == FALSE) { nsp32_msg(KERN_ERR, "SGT fail"); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return 0; } /* Build IDENTIFY */ nsp32_build_identify(SCpnt); /* * If target is the first time to transfer after the reset * (target don't have SDTR_DONE and SDTR_INITIATOR), sync * message SDTR is needed to do synchronous transfer. */ target = &data->target[scmd_id(SCpnt)]; data->cur_target = target; if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) { unsigned char period, offset; if (trans_mode != ASYNC_MODE) { nsp32_set_max_sync(data, target, &period, &offset); nsp32_build_sdtr(SCpnt, period, offset); target->sync_flag |= SDTR_INITIATOR; } else { nsp32_set_async(data, target); target->sync_flag |= SDTR_DONE; } nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR: entry: %d start_period: 0x%x offset: 0x%x\n", target->limit_entry, period, offset); } else if (target->sync_flag & SDTR_INITIATOR) { /* * It was negotiating SDTR with target, sending from the * initiator, but there are no chance to remove this flag. * Set async because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR_INITIATOR: fall back to async"); } else if (target->sync_flag & SDTR_TARGET) { /* * It was negotiating SDTR with target, sending from target, * but there are no chance to remove this flag. Set async * because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_TARGET; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "Unknown SDTR from target is reached, fall back to async."); } nsp32_dbg(NSP32_DEBUG_TARGETFLAG, "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x", SCpnt->device->id, target->sync_flag, target->syncreg, target->ackwidth); /* Selection */ if (auto_param == 0) { ret = nsp32_selection_autopara(SCpnt); } else { ret = nsp32_selection_autoscsi(SCpnt); } if (ret != TRUE) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail"); nsp32_scsi_done(SCpnt); } return 0; } static DEF_SCSI_QCMD(nsp32_queuecommand) /* initialize asic */ static int nsp32hw_init(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short irq_stat; unsigned long lc_reg; unsigned char power; lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE); if ((lc_reg & 0xff00) == 0) { lc_reg |= (0x20 << 8); nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff); } nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); do { irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat); } while (irq_stat & IRQSTATUS_ANY_IRQ); /* * Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is * designated by specification. */ if ((data->trans_method & NSP32_TRANSFER_PIO) || (data->trans_method & NSP32_TRANSFER_MMIO)) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40); } else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60); } else { nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode"); } nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x", nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT), nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT)); nsp32_index_write1(base, CLOCK_DIV, data->clock); nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD); nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */ /* * initialize MISC_WRRD register * * Note: Designated parameters is obeyed as following: * MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set. * MISC_MASTER_TERMINATION_SELECT: It must be set. * MISC_BMREQ_NEGATE_TIMING_SEL: It should be set. * MISC_AUTOSEL_TIMING_SEL: It should be set. * MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set. * MISC_DELAYED_BMSTART: It's selected for safety. * * Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then * we have to set TRANSFERCONTROL_BM_START as 0 and set * appropriate value before restarting bus master transfer. */ nsp32_index_write2(base, MISC_WR, (SCSI_DIRECTION_DETECTOR_SELECT | DELAYED_BMSTART | MASTER_TERMINATION_SELECT | BMREQ_NEGATE_TIMING_SEL | AUTOSEL_TIMING_SEL | BMSTOP_CHANGE2_NONDATA_PHASE)); nsp32_index_write1(base, TERM_PWR_CONTROL, 0); power = nsp32_index_read1(base, TERM_PWR_CONTROL); if (!(power & SENSE)) { nsp32_msg(KERN_INFO, "term power on"); nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR); } nsp32_write2(base, TIMER_SET, TIMER_STOP); nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */ nsp32_write1(base, SYNC_REG, 0); nsp32_write1(base, ACK_WIDTH, 0); nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * enable to select designated IRQ (except for * IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR) */ nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ | IRQSELECT_SCSIRESET_IRQ | IRQSELECT_FIFO_SHLD_IRQ | IRQSELECT_RESELECT_IRQ | IRQSELECT_PHASE_CHANGE_IRQ | IRQSELECT_AUTO_SCSI_SEQ_IRQ | // IRQSELECT_BMCNTERR_IRQ | IRQSELECT_TARGET_ABORT_IRQ | IRQSELECT_MASTER_ABORT_IRQ ); nsp32_write2(base, IRQ_CONTROL, 0); /* PCI LED off */ nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF); nsp32_index_write1(base, EXT_PORT, LED_OFF); return TRUE; } /* interrupt routine */ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) { nsp32_hw_data *data = dev_id; unsigned int base = data->BaseAddress; struct scsi_cmnd *SCpnt = data->CurrentSC; unsigned short auto_stat, irq_stat, trans_stat; unsigned char busmon, busphase; unsigned long flags; int ret; int handled = 0; struct Scsi_Host *host = data->Host; spin_lock_irqsave(host->host_lock, flags); /* * IRQ check, then enable IRQ mask */ irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INTR, "enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat); /* is this interrupt comes from Ninja asic? */ if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) { nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat); goto out2; } handled = 1; nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); busmon = nsp32_read1(base, SCSI_BUS_MONITOR); busphase = busmon & BUSMON_PHASE_MASK; trans_stat = nsp32_read2(base, TRANSFER_STATUS); if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) { nsp32_msg(KERN_INFO, "card disconnect"); if (data->CurrentSC != NULL) { nsp32_msg(KERN_INFO, "clean up current SCSI command"); SCpnt->result = DID_BAD_TARGET << 16; nsp32_scsi_done(SCpnt); } goto out; } /* Timer IRQ */ if (irq_stat & IRQSTATUS_TIMER_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "timer stop"); nsp32_write2(base, TIMER_SET, TIMER_STOP); goto out; } /* SCSI reset */ if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) { nsp32_msg(KERN_INFO, "detected someone do bus reset"); nsp32_do_bus_reset(data); if (SCpnt != NULL) { SCpnt->result = DID_RESET << 16; nsp32_scsi_done(SCpnt); } goto out; } if (SCpnt == NULL) { nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); goto out; } /* * AutoSCSI Interrupt. * Note: This interrupt is occurred when AutoSCSI is finished. Then * check SCSIEXECUTEPHASE, and do appropriate action. Each phases are * recorded when AutoSCSI sequencer has been processed. */ if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) { /* getting SCSI executed phase */ auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); /* Selection Timeout, go busfree phase. */ if (auto_stat & SELECTION_TIMEOUT) { nsp32_dbg(NSP32_DEBUG_INTR, "selection timeout occurred"); SCpnt->result = DID_TIME_OUT << 16; nsp32_scsi_done(SCpnt); goto out; } if (auto_stat & MSGOUT_PHASE) { /* * MsgOut phase was processed. * If MSG_IN_OCCUER is not set, then MsgOut phase is * completed. Thus, msgout_len must reset. Otherwise, * nothing to do here. If MSG_OUT_OCCUER is occurred, * then we will encounter the condition and check. */ if (!(auto_stat & MSG_IN_OCCUER) && (data->msgout_len <= 3)) { /* * !MSG_IN_OCCUER && msgout_len <=3 * ---> AutoSCSI with MSGOUTreg is processed. */ data->msgout_len = 0; }; nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed"); } if ((auto_stat & DATA_IN_PHASE) && (scsi_get_resid(SCpnt) > 0) && ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { printk( "auto+fifo\n"); //nsp32_pio_read(SCpnt); } if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) { /* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */ nsp32_dbg(NSP32_DEBUG_INTR, "Data in/out phase processed"); /* read BMCNT, SGT pointer addr */ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx", nsp32_read4(base, BM_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx", nsp32_read4(base, SGT_ADR)); nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx", nsp32_read4(base, SACK_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", nsp32_read4(base, SAVED_SACK_CNT)); scsi_set_resid(SCpnt, 0); /* all data transferred! */ } /* * MsgIn Occur */ if (auto_stat & MSG_IN_OCCUER) { nsp32_msgin_occur(SCpnt, irq_stat, auto_stat); } /* * MsgOut Occur */ if (auto_stat & MSG_OUT_OCCUER) { nsp32_msgout_occur(SCpnt); } /* * Bus Free Occur */ if (auto_stat & BUS_FREE_OCCUER) { ret = nsp32_busfree_occur(SCpnt, auto_stat); if (ret == TRUE) { goto out; } } if (auto_stat & STATUS_PHASE) { /* * Read CSB and substitute CSB for SCpnt->result * to save status phase stutas byte. * scsi error handler checks host_byte (DID_*: * low level driver to indicate status), then checks * status_byte (SCSI status byte). */ SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN); } if (auto_stat & ILLEGAL_PHASE) { /* Illegal phase is detected. SACK is not back. */ nsp32_msg(KERN_WARNING, "AUTO SCSI ILLEGAL PHASE OCCUR!!!!"); /* TODO: currently we don't have any action... bus reset? */ /* * To send back SACK, assert, wait, and negate. */ nsp32_sack_assert(data); nsp32_wait_req(data, NEGATE); nsp32_sack_negate(data); } if (auto_stat & COMMAND_PHASE) { /* nothing to do */ nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed"); } if (auto_stat & AUTOSCSI_BUSY) { /* AutoSCSI is running */ } show_autophase(auto_stat); } /* FIFO_SHLD_IRQ */ if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ"); switch(busphase) { case BUSPHASE_DATA_OUT: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write"); //nsp32_pio_write(SCpnt); break; case BUSPHASE_DATA_IN: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read"); //nsp32_pio_read(SCpnt); break; case BUSPHASE_STATUS: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); break; default: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase"); nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* Phase Change IRQ */ if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ"); switch(busphase) { case BUSPHASE_MESSAGE_IN: nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in"); nsp32_msgin_occur(SCpnt, irq_stat, 0); break; default: nsp32_msg(KERN_WARNING, "phase chg/other phase?"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* PCI_IRQ */ if (irq_stat & IRQSTATUS_PCI_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred"); /* Do nothing */ } /* BMCNTERR_IRQ */ if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) { nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); /* * TODO: To be implemented improving bus master * transfer reliability when BMCNTERR is occurred in * AutoSCSI phase described in specification. */ } #if 0 nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); #endif out: /* disable IRQ mask */ nsp32_write2(base, IRQ_CONTROL, 0); out2: spin_unlock_irqrestore(host->host_lock, flags); nsp32_dbg(NSP32_DEBUG_INTR, "exit"); return IRQ_RETVAL(handled); } #undef SPRINTF #define SPRINTF(args...) seq_printf(m, ##args) static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) { unsigned long flags; nsp32_hw_data *data; int hostno; unsigned int base; unsigned char mode_reg; int id, speed; long model; hostno = host->host_no; data = (nsp32_hw_data *)host->hostdata; base = host->io_port; SPRINTF("NinjaSCSI-32 status\n\n"); SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); SPRINTF("SCSI host No.: %d\n", hostno); SPRINTF("IRQ: %d\n", host->irq); SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); mode_reg = nsp32_index_read1(base, CHIP_MODE); model = data->pci_devid->driver_data; #ifdef CONFIG_PM SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); #endif SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); spin_lock_irqsave(&(data->Lock), flags); SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); SPRINTF("SDTR status\n"); for (id = 0; id < ARRAY_SIZE(data->target); id++) { SPRINTF("id %d: ", id); if (id == host->this_id) { SPRINTF("----- NinjaSCSI-32 host adapter\n"); continue; } if (data->target[id].sync_flag == SDTR_DONE) { if (data->target[id].period == 0 && data->target[id].offset == ASYNC_OFFSET ) { SPRINTF("async"); } else { SPRINTF(" sync"); } } else { SPRINTF(" none"); } if (data->target[id].period != 0) { speed = 1000000 / (data->target[id].period * 4); SPRINTF(" transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->target[id].offset ); } SPRINTF("\n"); } return 0; } #undef SPRINTF /* * Reset parameters and call scsi_done for data->cur_lunt. * Be careful setting SCpnt->result = DID_* before calling this function. */ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; scsi_dma_unmap(SCpnt); /* * clear TRANSFERCONTROL_BM_START */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); /* * call scsi_done */ (*SCpnt->scsi_done)(SCpnt); /* * reset parameters */ data->cur_lunt->SCpnt = NULL; data->cur_lunt = NULL; data->cur_target = NULL; data->CurrentSC = NULL; } /* * Bus Free Occur * * Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase * with ACK reply when below condition is matched: * MsgIn 00: Command Complete. * MsgIn 02: Save Data Pointer. * MsgIn 04: Diconnect. * In other case, unexpected BUSFREE is detected. */ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph); show_autophase(execph); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, TRANSFER_CONTROL, 0); /* * MsgIn 02: Save Data Pointer * * VALID: * Save Data Pointer is received. Adjust pointer. * * NO-VALID: * SCSI-3 says if Save Data Pointer is not received, then we restart * processing and we can't adjust any SCSI data pointer in next data * phase. */ if (execph & MSGIN_02_VALID) { nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid"); /* * Check sack_cnt/saved_sack_cnt, then adjust sg table if * needed. */ if (!(execph & MSGIN_00_VALID) && ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) { unsigned int sacklen, s_sacklen; /* * Read SACK count and SAVEDSACK count, then compare. */ sacklen = nsp32_read4(base, SACK_CNT ); s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); /* * If SAVEDSACKCNT == 0, it means SavedDataPointer is * come after data transferring. */ if (s_sacklen > 0) { /* * Comparing between sack and savedsack to * check the condition of AutoMsgIn03. * * If they are same, set msgin03 == TRUE, * COMMANDCONTROL_AUTO_MSGIN_03 is enabled at * reselection. On the other hand, if they * aren't same, set msgin03 == FALSE, and * COMMANDCONTROL_AUTO_MSGIN_03 is disabled at * reselection. */ if (sacklen != s_sacklen) { data->cur_lunt->msgin03 = FALSE; } else { data->cur_lunt->msgin03 = TRUE; } nsp32_adjust_busfree(SCpnt, s_sacklen); } } /* This value has not substitude with valid value yet... */ //data->cur_lunt->save_datp = data->cur_datp; } else { /* * no processing. */ } if (execph & MSGIN_03_VALID) { /* MsgIn03 was valid to be processed. No need processing. */ } /* * target SDTR check */ if (data->cur_target->sync_flag & SDTR_INITIATOR) { /* * SDTR negotiation pulled by the initiator has not * finished yet. Fall back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } else if (data->cur_target->sync_flag & SDTR_TARGET) { /* * SDTR negotiation pulled by the target has been * negotiating. */ if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) { /* * If valid message is received, then * negotiation is succeeded. */ } else { /* * On the contrary, if unexpected bus free is * occurred, then negotiation is failed. Fall * back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); } data->cur_target->sync_flag &= ~SDTR_TARGET; data->cur_target->sync_flag |= SDTR_DONE; } /* * It is always ensured by SCSI standard that initiator * switches into Bus Free Phase after * receiving message 00 (Command Complete), 04 (Disconnect). * It's the reason that processing here is valid. */ if (execph & MSGIN_00_VALID) { /* MsgIn 00: Command Complete */ nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 0; nsp32_dbg(NSP32_DEBUG_BUSFREE, "normal end stat=0x%x resid=0x%x\n", SCpnt->SCp.Status, scsi_get_resid(SCpnt)); SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); nsp32_scsi_done(SCpnt); /* All operation is done */ return TRUE; } else if (execph & MSGIN_04_VALID) { /* MsgIn 04: Disconnect */ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 4; nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect"); return TRUE; } else { /* Unexpected bus free */ nsp32_msg(KERN_WARNING, "unexpected bus free occurred"); /* DID_ERROR? */ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return TRUE; } return FALSE; } /* * nsp32_adjust_busfree - adjusting SG table * * Note: This driver adjust the SG table using SCSI ACK * counter instead of BMCNT counter! */ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int old_entry = data->cur_entry; int new_entry; int sg_num = data->cur_lunt->sg_num; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; unsigned int restlen, sentlen; u32_le len, addr; nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt)); /* adjust saved SACK count with 4 byte start address boundary */ s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; /* * calculate new_entry from sack count and each sgt[].len * calculate the byte which is intent to send */ sentlen = 0; for (new_entry = old_entry; new_entry < sg_num; new_entry++) { sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND); if (sentlen > s_sacklen) { break; } } /* all sgt is processed */ if (new_entry == sg_num) { goto last; } if (sentlen == s_sacklen) { /* XXX: confirm it's ok or not */ /* In this case, it's ok because we are at the head element of the sg. restlen is correctly calculated. */ } /* calculate the rest length for transferring */ restlen = sentlen - s_sacklen; /* update adjusting current SG table entry */ len = le32_to_cpu(sgt[new_entry].len); addr = le32_to_cpu(sgt[new_entry].addr); addr += (len - restlen); sgt[new_entry].addr = cpu_to_le32(addr); sgt[new_entry].len = cpu_to_le32(restlen); /* set cur_entry with new_entry */ data->cur_entry = new_entry; return; last: if (scsi_get_resid(SCpnt) < sentlen) { nsp32_msg(KERN_ERR, "resid underflow"); } scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen); nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt)); /* update hostdata and lun */ return; } /* * It's called MsgOut phase occur. * NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in * message out phase. It, however, has more than 3 messages, * HBA creates the interrupt and we have to process by hand. */ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; //unsigned short command; long new_sgtp; int i; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "enter: msgout_len: 0x%x", data->msgout_len); /* * If MsgOut phase is occurred without having any * message, then No_Operation is sent (SCSI-2). */ if (data->msgout_len == 0) { nsp32_build_nop(SCpnt); } /* * Set SGTP ADDR current entry for restarting AUTOSCSI, * because SGTP is incremented next point. * There is few statement in the specification... */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); /* * send messages */ for (i = 0; i < data->msgout_len; i++) { nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "%d : 0x%x", i, data->msgoutbuf[i]); /* * Check REQ is asserted. */ nsp32_wait_req(data, ASSERT); if (i == (data->msgout_len - 1)) { /* * If the last message, set the AutoSCSI restart * before send back the ack message. AutoSCSI * restart automatically negate ATN signal. */ //command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); //nsp32_restart_autoscsi(SCpnt, command); nsp32_write2(base, COMMAND_CONTROL, (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 )); } /* * Write data with SACK, then wait sack is * automatically negated. */ nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]); nsp32_wait_sack(data, NEGATE); nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n", nsp32_read1(base, SCSI_BUS_MONITOR)); }; data->msgout_len = 0; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit"); } /* * Restart AutoSCSI * * Note: Restarting AutoSCSI needs set: * SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL */ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = data->BaseAddress; unsigned short transfer = 0; nsp32_dbg(NSP32_DEBUG_RESTART, "enter"); if (data->cur_target == NULL || data->cur_lunt == NULL) { nsp32_msg(KERN_ERR, "Target or Lun is invalid"); } /* * set SYNC_REG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); /* * set SREQ hazard killer sampling rate */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ transfer = 0; transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { transfer |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { transfer |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { transfer |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, transfer); /* * restart AutoSCSI * * TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ? */ command |= (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART ); nsp32_write2(base, COMMAND_CONTROL, command); nsp32_dbg(NSP32_DEBUG_RESTART, "exit"); } /* * cannot run automatically message in occur */ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt, unsigned long irq_status, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned char msg; unsigned char msgtype; unsigned char newlun; unsigned short command = 0; int msgclear = TRUE; long new_sgtp; int ret; /* * read first message * Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure * of Message-In have to be processed before sending back SCSI ACK. */ msg = nsp32_read1(base, SCSI_DATA_IN); data->msginbuf[(unsigned char)data->msgin_len] = msg; msgtype = data->msginbuf[0]; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x", data->msgin_len, msg, msgtype); /* * TODO: We need checking whether bus phase is message in? */ /* * assert SCSI ACK */ nsp32_sack_assert(data); /* * processing IDENTIFY */ if (msgtype & 0x80) { if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) { /* Invalid (non reselect) phase */ goto reject; } newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */ ret = nsp32_reselection(SCpnt, newlun); if (ret == TRUE) { goto restart; } else { goto reject; } } /* * processing messages except for IDENTIFY * * TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO. */ switch (msgtype) { /* * 1-byte message */ case COMMAND_COMPLETE: case DISCONNECT: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg(KERN_WARNING, "unexpected message of AutoSCSI MsgIn: 0x%x", msg); break; case RESTORE_POINTERS: /* * AutoMsgIn03 is disabled, and HBA gets this message. */ if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) { unsigned int s_sacklen; s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) { nsp32_adjust_busfree(SCpnt, s_sacklen); } else { /* No need to rewrite SGT */ } } data->cur_lunt->msgin03 = FALSE; /* Update with the new value */ /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * set new sg pointer */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); nsp32_write4(base, SGT_ADR, new_sgtp); break; case SAVE_POINTERS: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg (KERN_WARNING, "unexpected message of AutoSCSI MsgIn: SAVE_POINTERS"); break; case MESSAGE_REJECT: /* If previous message_out is sending SDTR, and get message_reject from target, SDTR negotiation is failed */ if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* * Current target is negotiating SDTR, but it's * failed. Fall back to async transfer mode, and set * SDTR_DONE. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } break; case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* queue tag is not supported currently */ nsp32_msg (KERN_WARNING, "unsupported message: 0x%x", msgtype); break; case INITIATE_RECOVERY: /* staring ECA (Extended Contingent Allegiance) state. */ /* This message is declined in SPI2 or later. */ goto reject; /* * 2-byte message */ case SIMPLE_QUEUE_TAG: case 0x23: /* * 0x23: Ignore_Wide_Residue is not declared in scsi.h. * No support is needed. */ if (data->msgin_len >= 1) { goto reject; } /* current position is 1-byte of 2 byte */ msgclear = FALSE; break; /* * extended message */ case EXTENDED_MESSAGE: if (data->msgin_len < 1) { /* * Current position does not reach 2-byte * (2-byte is extended message length). */ msgclear = FALSE; break; } if ((data->msginbuf[1] + 1) > data->msgin_len) { /* * Current extended message has msginbuf[1] + 2 * (msgin_len starts counting from 0, so buf[1] + 1). * If current message position is not finished, * continue receiving message. */ msgclear = FALSE; break; } /* * Reach here means regular length of each type of * extended messages. */ switch (data->msginbuf[2]) { case EXTENDED_MODIFY_DATA_POINTER: /* TODO */ goto reject; /* not implemented yet */ break; case EXTENDED_SDTR: /* * Exchange this message between initiator and target. */ if (data->msgin_len != EXTENDED_SDTR_LEN + 1) { /* * received inappropriate message. */ goto reject; break; } nsp32_analyze_sdtr(SCpnt); break; case EXTENDED_EXTENDED_IDENTIFY: /* SCSI-I only, not supported. */ goto reject; /* not implemented yet */ break; case EXTENDED_WDTR: goto reject; /* not implemented yet */ break; default: goto reject; } break; default: goto reject; } restart: if (msgclear == TRUE) { data->msgin_len = 0; /* * If restarting AutoSCSI, but there are some message to out * (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0 * (MV_VALID = 0). When commandcontrol is written with * AutoSCSI restart, at the same time MsgOutOccur should be * happened (however, such situation is really possible...?). */ if (data->msgout_len > 0) { nsp32_write4(base, SCSI_MSG_OUT, 0); command |= AUTO_ATN; } /* * restart AutoSCSI * If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed. */ command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); /* * If current msgin03 is TRUE, then flag on. */ if (data->cur_lunt->msgin03 == TRUE) { command |= AUTO_MSGIN_03; } data->cur_lunt->msgin03 = FALSE; } else { data->msgin_len++; } /* * restart AutoSCSI */ nsp32_restart_autoscsi(SCpnt, command); /* * wait SCSI REQ negate for REQ-ACK handshake */ nsp32_wait_req(data, NEGATE); /* * negate SCSI ACK */ nsp32_sack_negate(data); nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: nsp32_msg(KERN_WARNING, "invalid or unsupported MessageIn, rejected. " "current msg: 0x%x (len: 0x%x), processing msg: 0x%x", msg, data->msgin_len, msgtype); nsp32_build_reject(SCpnt); data->msgin_len = 0; goto restart; } /* * */ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target = data->cur_target; nsp32_sync_table *synct; unsigned char get_period = data->msginbuf[3]; unsigned char get_offset = data->msginbuf[4]; int entry; int syncnum; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter"); synct = data->synct; syncnum = data->syncnum; /* * If this inititor sent the SDTR message, then target responds SDTR, * initiator SYNCREG, ACKWIDTH from SDTR parameter. * Messages are not appropriate, then send back reject message. * If initiator did not send the SDTR, but target sends SDTR, * initiator calculator the appropriate parameter and send back SDTR. */ if (target->sync_flag & SDTR_INITIATOR) { /* * Initiator sent SDTR, the target responds and * send back negotiation SDTR. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR"); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; /* * offset: */ if (get_offset > SYNC_OFFSET) { /* * Negotiation is failed, the target send back * unexpected offset value. */ goto reject; } if (get_offset == ASYNC_OFFSET) { /* * Negotiation is succeeded, the target want * to fall back into asynchronous transfer mode. */ goto async; } /* * period: * Check whether sync period is too short. If too short, * fall back to async mode. If it's ok, then investigate * the received sync period. If sync period is acceptable * between sync table start_period and end_period, then * set this I_T nexus as sent offset and period. * If it's not acceptable, send back reject and fall back * to async mode. */ if (get_period < data->synct[0].period_num) { /* * Negotiation is failed, the target send back * unexpected period value. */ goto reject; } entry = nsp32_search_period_entry(data, target, get_period); if (entry < 0) { /* * Target want to use long period which is not * acceptable NinjaSCSI-32Bi/UDE. */ goto reject; } /* * Set new sync table and offset in this I_T nexus. */ nsp32_set_sync_entry(data, target, entry, get_offset); } else { /* Target send SDTR to initiator. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR"); target->sync_flag |= SDTR_INITIATOR; /* offset: */ if (get_offset > SYNC_OFFSET) { /* send back as SYNC_OFFSET */ get_offset = SYNC_OFFSET; } /* period: */ if (get_period < data->synct[0].period_num) { get_period = data->synct[0].period_num; } entry = nsp32_search_period_entry(data, target, get_period); if (get_offset == ASYNC_OFFSET || entry < 0) { nsp32_set_async(data, target); nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET); } else { nsp32_set_sync_entry(data, target, entry, get_offset); nsp32_build_sdtr(SCpnt, get_period, get_offset); } } target->period = get_period; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: /* * If the current message is unacceptable, send back to the target * with reject message. */ nsp32_build_reject(SCpnt); async: nsp32_set_async(data, target); /* set as ASYNC transfer mode */ target->period = 0; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async"); return; } /* * Search config entry number matched in sync_table from given * target and speed period value. If failed to search, return negative value. */ static int nsp32_search_period_entry(nsp32_hw_data *data, nsp32_target *target, unsigned char period) { int i; if (target->limit_entry >= data->syncnum) { nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!"); target->limit_entry = 0; } for (i = target->limit_entry; i < data->syncnum; i++) { if (period >= data->synct[i].start_period && period <= data->synct[i].end_period) { break; } } /* * Check given period value is over the sync_table value. * If so, return max value. */ if (i == data->syncnum) { i = -1; } return i; } /* * target <-> initiator use ASYNC transfer */ static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target) { unsigned char period = data->synct[target->limit_entry].period_num; target->offset = ASYNC_OFFSET; target->period = 0; target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); target->ackwidth = 0; target->sample_reg = 0; nsp32_dbg(NSP32_DEBUG_SYNC, "set async"); } /* * target <-> initiator use maximum SYNC transfer */ static void nsp32_set_max_sync(nsp32_hw_data *data, nsp32_target *target, unsigned char *period, unsigned char *offset) { unsigned char period_num, ackwidth; period_num = data->synct[target->limit_entry].period_num; *period = data->synct[target->limit_entry].start_period; ackwidth = data->synct[target->limit_entry].ackwidth; *offset = SYNC_OFFSET; target->syncreg = TO_SYNCREG(period_num, *offset); target->ackwidth = ackwidth; target->offset = *offset; target->sample_reg = 0; /* disable SREQ sampling */ } /* * target <-> initiator use entry number speed */ static void nsp32_set_sync_entry(nsp32_hw_data *data, nsp32_target *target, int entry, unsigned char offset) { unsigned char period, ackwidth, sample_rate; period = data->synct[entry].period_num; ackwidth = data->synct[entry].ackwidth; offset = offset; sample_rate = data->synct[entry].sample_rate; target->syncreg = TO_SYNCREG(period, offset); target->ackwidth = ackwidth; target->offset = offset; target->sample_reg = sample_rate | SAMPLING_ENABLE; nsp32_dbg(NSP32_DEBUG_SYNC, "set sync"); } /* * It waits until SCSI REQ becomes assertion or negation state. * * Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then * connected target responds SCSI REQ negation. We have to wait * SCSI REQ becomes negation in order to negate SCSI ACK signal for * REQ-ACK handshake. */ static void nsp32_wait_req(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, req_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* REQ is BIT(5) */ req_bit = (state == ASSERT ? BUSMON_REQ : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_REQ) == req_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit); } /* * It waits until SCSI SACK becomes assertion or negation state. */ static void nsp32_wait_sack(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, ack_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* ACK is BIT(4) */ ack_bit = (state == ASSERT ? BUSMON_ACK : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_ACK) == ack_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit); } /* * assert SCSI ACK * * Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1. */ static void nsp32_sack_assert(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB); nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * negate SCSI ACK */ static void nsp32_sack_negate(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl &= ~BUSCTL_ACK; nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * Note: n_io_port is defined as 0x7f because I/O register port is * assigned as: * 0x800-0x8ff: memory mapped I/O port * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) * 0xc00-0xfff: CardBus status registers */ static int nsp32_detect(struct pci_dev *pdev) { struct Scsi_Host *host; /* registered host structure */ struct resource *res; nsp32_hw_data *data; int ret; int i, j; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); /* * register this HBA as SCSI device */ host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); if (host == NULL) { nsp32_msg (KERN_ERR, "failed to scsi register"); goto err; } /* * set nsp32_hw_data */ data = (nsp32_hw_data *)host->hostdata; memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data)); host->irq = data->IrqNumber; host->io_port = data->BaseAddress; host->unique_id = data->BaseAddress; host->n_io_port = data->NumAddress; host->base = (unsigned long)data->MmioAddress; data->Host = host; spin_lock_init(&(data->Lock)); data->cur_lunt = NULL; data->cur_target = NULL; /* * Bus master transfer mode is supported currently. */ data->trans_method = NSP32_TRANSFER_BUSMASTER; /* * Set clock div, CLOCK_4 (HBA has own external clock, and * dividing * 100ns/4). * Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet. */ data->clock = CLOCK_4; /* * Select appropriate nsp32_sync_table and set I_CLOCKDIV. */ switch (data->clock) { case CLOCK_4: /* If data->clock is CLOCK_4, then select 40M sync table. */ data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); break; case CLOCK_2: /* If data->clock is CLOCK_2, then select 20M sync table. */ data->synct = nsp32_sync_table_20M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M); break; case PCICLK: /* If data->clock is PCICLK, then select pci sync table. */ data->synct = nsp32_sync_table_pci; data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci); break; default: nsp32_msg(KERN_WARNING, "Invalid clock div is selected, set CLOCK_4."); /* Use default value CLOCK_4 */ data->clock = CLOCK_4; data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); } /* * setup nsp32_lunt */ /* * setup DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); goto scsi_unregister; } /* * allocate autoparam DMA resource. */ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr)); if (data->autoparam == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto scsi_unregister; } /* * allocate scatter-gather DMA resource. */ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE, &(data->sg_paddr)); if (data->sg_list == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto free_autoparam; } for (i = 0; i < ARRAY_SIZE(data->lunt); i++) { for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) { int offset = i * ARRAY_SIZE(data->lunt[0]) + j; nsp32_lunt tmp = { .SCpnt = NULL, .save_datp = 0, .msgin03 = FALSE, .sg_num = 0, .cur_entry = 0, .sglun = &(data->sg_list[offset]), .sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)), }; data->lunt[i][j] = tmp; } } /* * setup target */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &(data->target[i]); target->limit_entry = 0; target->sync_flag = 0; nsp32_set_async(data, target); } /* * EEPROM check */ ret = nsp32_getprom_param(data); if (ret == FALSE) { data->resettime = 3; /* default 3 */ } /* * setup HBA */ nsp32hw_init(data); snprintf(data->info_str, sizeof(data->info_str), "NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x", host->irq, host->io_port, host->n_io_port); /* * SCSI bus reset * * Note: It's important to reset SCSI bus in initialization phase. * NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when * system is coming up, so SCSI devices connected to HBA is set as * un-asynchronous mode. It brings the merit that this HBA is * ready to start synchronous transfer without any preparation, * but we are difficult to control transfer speed. In addition, * it prevents device transfer speed from effecting EEPROM start-up * SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as * Auto Mode, then FAST-10M is selected when SCSI devices are * connected same or more than 4 devices. It should be avoided * depending on this specification. Thus, resetting the SCSI bus * restores all connected SCSI devices to asynchronous mode, then * this driver set SDTR safely later, and we can control all SCSI * device transfer mode. */ nsp32_do_bus_reset(data); ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data); if (ret < 0) { nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " "SCSI PCI controller. Interrupt: %d", host->irq); goto free_sg_list; } /* * PCI IO register */ res = request_region(host->io_port, host->n_io_port, "nsp32"); if (res == NULL) { nsp32_msg(KERN_ERR, "I/O region 0x%lx+0x%lx is already used", data->BaseAddress, data->NumAddress); goto free_irq; } ret = scsi_add_host(host, &pdev->dev); if (ret) { nsp32_msg(KERN_ERR, "failed to add scsi host"); goto free_region; } scsi_scan_host(host); pci_set_drvdata(pdev, host); return 0; free_region: release_region(host->io_port, host->n_io_port); free_irq: free_irq(host->irq, data); free_sg_list: pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); free_autoparam: pci_free_consistent(pdev, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); scsi_unregister: scsi_host_put(host); err: return 1; } static int nsp32_release(struct Scsi_Host *host) { nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; if (data->autoparam) { pci_free_consistent(data->Pci, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); } if (data->sg_list) { pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); } if (host->irq) { free_irq(host->irq, data); } if (host->io_port && host->n_io_port) { release_region(host->io_port, host->n_io_port); } if (data->MmioAddress) { iounmap(data->MmioAddress); } return 0; } static const char *nsp32_info(struct Scsi_Host *shpnt) { nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata; return data->info_str; } /**************************************************************************** * error handler */ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_msg(KERN_WARNING, "abort"); if (data->cur_lunt->SCpnt == NULL) { nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed"); return FAILED; } if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* reset SDTR negotiation */ data->cur_target->sync_flag = 0; nsp32_set_async(data, data->cur_target); } nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write2(base, BM_CNT, 0); SCpnt->result = DID_ABORT << 16; nsp32_scsi_done(SCpnt); nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success"); return SUCCESS; } static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; spin_lock_irq(SCpnt->device->host->host_lock); nsp32_msg(KERN_INFO, "Bus Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* SCSI bus reset is succeeded at any time. */ } static void nsp32_do_bus_reset(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short intrdat; int i; nsp32_dbg(NSP32_DEBUG_BUSRESET, "in"); /* * stop all transfer * clear TRANSFERCONTROL_BM_START * clear counter */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * fall back to asynchronous transfer mode * initialize SDTR negotiation flag */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &data->target[i]; target->sync_flag = 0; nsp32_set_async(data, target); } /* * reset SCSI bus */ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); mdelay(RESET_HOLD_TIME / 1000); nsp32_write1(base, SCSI_BUS_CONTROL, 0); for(i = 0; i < 5; i++) { intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat); } data->CurrentSC = NULL; } static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt) { struct Scsi_Host *host = SCpnt->device->host; unsigned int base = SCpnt->device->host->io_port; nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; nsp32_msg(KERN_INFO, "Host Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); spin_lock_irq(SCpnt->device->host->host_lock); nsp32hw_init(data); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* Host reset is succeeded at any time. */ } /************************************************************************** * EEPROM handler */ /* * getting EEPROM parameter */ static int nsp32_getprom_param(nsp32_hw_data *data) { int vendor = data->pci_devid->vendor; int device = data->pci_devid->device; int ret, val, i; /* * EEPROM checking. */ ret = nsp32_prom_read(data, 0x7e); if (ret != 0x55) { nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret); return FALSE; } ret = nsp32_prom_read(data, 0x7f); if (ret != 0xaa) { nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret); return FALSE; } /* * check EEPROM type */ if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_WORKBIT_STANDARD) { ret = nsp32_getprom_c16(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) { ret = nsp32_getprom_at24(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) { ret = nsp32_getprom_at24(data); } else { nsp32_msg(KERN_WARNING, "Unknown EEPROM"); ret = FALSE; } /* for debug : SPROM data full checking */ for (i = 0; i <= 0x1f; i++) { val = nsp32_prom_read(data, i); nsp32_dbg(NSP32_DEBUG_EEPROM, "rom address 0x%x : 0x%x", i, val); } return ret; } /* * AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M * 0x07 : HBA Synchronous Transfer Period * Value 0: AutoSync, 1: Manual Setting * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Bus Termination * Value 0: Auto[ON], 1: ON, 2: OFF * 0x11 : Not Used? (0) * 0x12 : Bus Reset Delay Time (0x03) * 0x13 : Bootable CD Support * Value 0: Disable, 1: Enable * 0x14 : Device Scan * Bit 7 6 5 4 3 2 1 0 * | <-----------------> * | SCSI ID: Value 0: Skip, 1: YES * |-> Value 0: ALL scan, Value 1: Manual * 0x15 - 0x1b : Not Used? (0) * 0x1c : Constant? (0x01) (clock div?) * 0x1d - 0x7c : Not Used (0xff) * 0x7d : Not Used? (0xff) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_at24(nsp32_hw_data *data) { int ret, i; int auto_sync; nsp32_target *target; int entry; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x12); /* * HBA Synchronous Transfer Period * * Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says * that if auto_sync is 0 (auto), and connected SCSI devices are * same or lower than 3, then transfer speed is set as ULTRA-20M. * On the contrary if connected SCSI devices are same or higher * than 4, then transfer speed is set as FAST-10M. * * I break this rule. The number of connected SCSI devices are * only ignored. If auto_sync is 0 (auto), then transfer speed is * forced as ULTRA-20M. */ ret = nsp32_prom_read(data, 0x07); switch (ret) { case 0: auto_sync = TRUE; break; case 1: auto_sync = FALSE; break; default: nsp32_msg(KERN_WARNING, "Unsupported Auto Sync mode. Fall back to manual mode."); auto_sync = TRUE; } if (trans_mode == ULTRA20M_MODE) { auto_sync = TRUE; } /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; if (auto_sync == TRUE) { target->limit_entry = 0; /* set as ULTRA20M */ } else { ret = nsp32_prom_read(data, i); entry = nsp32_search_period_entry(data, target, ret); if (entry < 0) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } } return TRUE; } /* * C16 110 (I-O Data: SC-NBD) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC * 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync) * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Transfer Mode * Value 0: PIO, 1: Busmater * 0x11 : Bus Reset Delay Time (0x00-0x20) * 0x12 : Bus Termination * Value 0: Disable, 1: Enable * 0x13 - 0x19 : Disconnection * Value 0: Disable, 1: Enable * 0x1a - 0x7c : Not Used? (0) * 0x7d : Not Used? (0xf8) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_c16(nsp32_hw_data *data) { int ret, i; nsp32_target *target; int entry, val; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x11); /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; ret = nsp32_prom_read(data, i); switch (ret) { case 0: /* 20MB/s */ val = 0x0c; break; case 1: /* 10MB/s */ val = 0x19; break; case 2: /* 5MB/s */ val = 0x32; break; case 3: /* ASYNC */ val = 0x00; break; default: /* default 20MB/s */ val = 0x0c; break; } entry = nsp32_search_period_entry(data, target, val); if (entry < 0 || trans_mode == ULTRA20M_MODE) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } return TRUE; } /* * Atmel AT24C01A (drived in 5V) serial EEPROM routines */ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr) { int i, val; /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: W for dummy write */ nsp32_prom_write_bit(data, 0); /* ack */ nsp32_prom_write_bit(data, 0); /* word address */ for (i = 7; i >= 0; i--) { nsp32_prom_write_bit(data, ((romaddr >> i) & 1)); } /* ack */ nsp32_prom_write_bit(data, 0); /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: R */ nsp32_prom_write_bit(data, 1); /* ack */ nsp32_prom_write_bit(data, 0); /* data... */ val = 0; for (i = 7; i >= 0; i--) { val += (nsp32_prom_read_bit(data) << i); } /* no ack */ nsp32_prom_write_bit(data, 1); /* stop condition */ nsp32_prom_stop(data); return val; } static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val) { int base = data->BaseAddress; int tmp; tmp = nsp32_index_read1(base, SERIAL_ROM_CTL); if (val == 0) { tmp &= ~bit; } else { tmp |= bit; } nsp32_index_write1(base, SERIAL_ROM_CTL, tmp); udelay(10); } static int nsp32_prom_get(nsp32_hw_data *data, int bit) { int base = data->BaseAddress; int tmp, ret; if (bit != SDA) { nsp32_msg(KERN_ERR, "return value is not appropriate"); return 0; } tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit; if (tmp == 0) { ret = 0; } else { ret = 1; } udelay(10); return ret; } static void nsp32_prom_start (nsp32_hw_data *data) { /* start condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting * SDA 1->0 is start condition */ nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_stop (nsp32_hw_data *data) { /* stop condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_write_bit(nsp32_hw_data *data, int val) { /* write */ nsp32_prom_set(data, SDA, val); nsp32_prom_set(data, SCL, 1 ); nsp32_prom_set(data, SCL, 0 ); } static int nsp32_prom_read_bit(nsp32_hw_data *data) { int val; /* read */ nsp32_prom_set(data, ENA, 0); /* input mode */ nsp32_prom_set(data, SCL, 1); val = nsp32_prom_get(data, SDA); nsp32_prom_set(data, SCL, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ return val; } /************************************************************************** * Power Management */ #ifdef CONFIG_PM /* Device suspended */ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host); pci_save_state (pdev); pci_disable_device (pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /* Device woken up */ static int nsp32_resume(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; unsigned short reg; nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host); pci_set_power_state(pdev, PCI_D0); pci_enable_wake (pdev, PCI_D0, 0); pci_restore_state (pdev); reg = nsp32_read2(data->BaseAddress, INDEX_REG); nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg); if (reg == 0xffff) { nsp32_msg(KERN_INFO, "missing device. abort resume."); return 0; } nsp32hw_init (data); nsp32_do_bus_reset(data); nsp32_msg(KERN_INFO, "resume success"); return 0; } #endif /************************************************************************ * PCI/Cardbus probe/remove routine */ static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; nsp32_hw_data *data = &nsp32_data_base; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); ret = pci_enable_device(pdev); if (ret) { nsp32_msg(KERN_ERR, "failed to enable pci device"); return ret; } data->Pci = pdev; data->pci_devid = id; data->IrqNumber = pdev->irq; data->BaseAddress = pci_resource_start(pdev, 0); data->NumAddress = pci_resource_len (pdev, 0); data->MmioAddress = pci_ioremap_bar(pdev, 1); data->MmioLength = pci_resource_len (pdev, 1); pci_set_master(pdev); ret = nsp32_detect(pdev); nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", pdev->irq, data->MmioAddress, data->MmioLength, pci_name(pdev), nsp32_model[id->driver_data]); nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret); return ret; } static void nsp32_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); scsi_remove_host(host); nsp32_release(host); scsi_host_put(host); } static struct pci_driver nsp32_driver = { .name = "nsp32", .id_table = nsp32_pci_table, .probe = nsp32_probe, .remove = nsp32_remove, #ifdef CONFIG_PM .suspend = nsp32_suspend, .resume = nsp32_resume, #endif }; /********************************************************************* * Moule entry point */ static int __init init_nsp32(void) { nsp32_msg(KERN_INFO, "loading..."); return pci_register_driver(&nsp32_driver); } static void __exit exit_nsp32(void) { nsp32_msg(KERN_INFO, "unloading..."); pci_unregister_driver(&nsp32_driver); } module_init(init_nsp32); module_exit(exit_nsp32); /* end */
gpl-2.0
StephenRJ/cm12_kernel_moto_shamu
drivers/scsi/nsp32.c
2528
90538
/* * NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver * Copyright (C) 2001, 2002, 2003 * YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp> * GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Revision History: * 1.0: Initial Release. * 1.1: Add /proc SDTR status. * Remove obsolete error handler nsp32_reset. * Some clean up. * 1.2: PowerPC (big endian) support. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/dma-mapping.h> #include <asm/dma.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include "nsp32.h" /*********************************************************************** * Module parameters */ static int trans_mode = 0; /* default: BIOS */ module_param (trans_mode, int, 0); MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M"); #define ASYNC_MODE 1 #define ULTRA20M_MODE 2 static bool auto_param = 0; /* default: ON */ module_param (auto_param, bool, 0); MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)"); static bool disc_priv = 1; /* default: OFF */ module_param (disc_priv, bool, 0); MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))"); MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>"); MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module"); MODULE_LICENSE("GPL"); static const char *nsp32_release_version = "1.2"; /**************************************************************************** * Supported hardware */ static struct pci_device_id nsp32_pci_table[] = { { .vendor = PCI_VENDOR_ID_IODATA, .device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_IODATA, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_KME, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_KME, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_WORKBIT_STANDARD, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_WORKBIT, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_LOGITEC, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, { .vendor = PCI_VENDOR_ID_WORKBIT, .device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = MODEL_PCI_MELCO, }, {0,0,}, }; MODULE_DEVICE_TABLE(pci, nsp32_pci_table); static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */ /* * Period/AckWidth speed conversion table * * Note: This period/ackwidth speed table must be in descending order. */ static nsp32_sync_table nsp32_sync_table_40M[] = { /* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */ {0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */ {0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */ {0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */ {0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */ {0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */ {0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */ {0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ }; static nsp32_sync_table nsp32_sync_table_20M[] = { {0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */ {0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */ {0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */ {0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */ {0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */ {0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */ {0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */ {0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */ {0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */ }; static nsp32_sync_table nsp32_sync_table_pci[] = { {0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */ {0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */ {0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */ {0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */ {0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */ {0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */ {0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */ {0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */ {0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */ }; /* * function declaration */ /* module entry point */ static int nsp32_probe (struct pci_dev *, const struct pci_device_id *); static void nsp32_remove(struct pci_dev *); static int __init init_nsp32 (void); static void __exit exit_nsp32 (void); /* struct struct scsi_host_template */ static int nsp32_show_info (struct seq_file *, struct Scsi_Host *); static int nsp32_detect (struct pci_dev *pdev); static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static const char *nsp32_info (struct Scsi_Host *); static int nsp32_release (struct Scsi_Host *); /* SCSI error handler */ static int nsp32_eh_abort (struct scsi_cmnd *); static int nsp32_eh_bus_reset (struct scsi_cmnd *); static int nsp32_eh_host_reset(struct scsi_cmnd *); /* generate SCSI message */ static void nsp32_build_identify(struct scsi_cmnd *); static void nsp32_build_nop (struct scsi_cmnd *); static void nsp32_build_reject (struct scsi_cmnd *); static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char); /* SCSI message handler */ static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short); static void nsp32_msgout_occur (struct scsi_cmnd *); static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short); static int nsp32_setup_sg_table (struct scsi_cmnd *); static int nsp32_selection_autopara(struct scsi_cmnd *); static int nsp32_selection_autoscsi(struct scsi_cmnd *); static void nsp32_scsi_done (struct scsi_cmnd *); static int nsp32_arbitration (struct scsi_cmnd *, unsigned int); static int nsp32_reselection (struct scsi_cmnd *, unsigned char); static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int); static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short); /* SCSI SDTR */ static void nsp32_analyze_sdtr (struct scsi_cmnd *); static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char); static void nsp32_set_async (nsp32_hw_data *, nsp32_target *); static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *); static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char); /* SCSI bus status handler */ static void nsp32_wait_req (nsp32_hw_data *, int); static void nsp32_wait_sack (nsp32_hw_data *, int); static void nsp32_sack_assert (nsp32_hw_data *); static void nsp32_sack_negate (nsp32_hw_data *); static void nsp32_do_bus_reset(nsp32_hw_data *); /* hardware interrupt handler */ static irqreturn_t do_nsp32_isr(int, void *); /* initialize hardware */ static int nsp32hw_init(nsp32_hw_data *); /* EEPROM handler */ static int nsp32_getprom_param (nsp32_hw_data *); static int nsp32_getprom_at24 (nsp32_hw_data *); static int nsp32_getprom_c16 (nsp32_hw_data *); static void nsp32_prom_start (nsp32_hw_data *); static void nsp32_prom_stop (nsp32_hw_data *); static int nsp32_prom_read (nsp32_hw_data *, int); static int nsp32_prom_read_bit (nsp32_hw_data *); static void nsp32_prom_write_bit(nsp32_hw_data *, int); static void nsp32_prom_set (nsp32_hw_data *, int, int); static int nsp32_prom_get (nsp32_hw_data *, int); /* debug/warning/info message */ static void nsp32_message (const char *, int, char *, char *, ...); #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *, int, int, char *, ...); #endif /* * max_sectors is currently limited up to 128. */ static struct scsi_host_template nsp32_template = { .proc_name = "nsp32", .name = "Workbit NinjaSCSI-32Bi/UDE", .show_info = nsp32_show_info, .info = nsp32_info, .queuecommand = nsp32_queuecommand, .can_queue = 1, .sg_tablesize = NSP32_SG_SIZE, .max_sectors = 128, .cmd_per_lun = 1, .this_id = NSP32_HOST_SCSIID, .use_clustering = DISABLE_CLUSTERING, .eh_abort_handler = nsp32_eh_abort, .eh_bus_reset_handler = nsp32_eh_bus_reset, .eh_host_reset_handler = nsp32_eh_host_reset, /* .highmem_io = 1, */ }; #include "nsp32_io.h" /*********************************************************************** * debug, error print */ #ifndef NSP32_DEBUG # define NSP32_DEBUG_MASK 0x000000 # define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args) # define nsp32_dbg(mask, args...) /* */ #else # define NSP32_DEBUG_MASK 0xffffff # define nsp32_msg(type, args...) \ nsp32_message (__func__, __LINE__, (type), args) # define nsp32_dbg(mask, args...) \ nsp32_dmessage(__func__, __LINE__, (mask), args) #endif #define NSP32_DEBUG_QUEUECOMMAND BIT(0) #define NSP32_DEBUG_REGISTER BIT(1) #define NSP32_DEBUG_AUTOSCSI BIT(2) #define NSP32_DEBUG_INTR BIT(3) #define NSP32_DEBUG_SGLIST BIT(4) #define NSP32_DEBUG_BUSFREE BIT(5) #define NSP32_DEBUG_CDB_CONTENTS BIT(6) #define NSP32_DEBUG_RESELECTION BIT(7) #define NSP32_DEBUG_MSGINOCCUR BIT(8) #define NSP32_DEBUG_EEPROM BIT(9) #define NSP32_DEBUG_MSGOUTOCCUR BIT(10) #define NSP32_DEBUG_BUSRESET BIT(11) #define NSP32_DEBUG_RESTART BIT(12) #define NSP32_DEBUG_SYNC BIT(13) #define NSP32_DEBUG_WAIT BIT(14) #define NSP32_DEBUG_TARGETFLAG BIT(15) #define NSP32_DEBUG_PROC BIT(16) #define NSP32_DEBUG_INIT BIT(17) #define NSP32_SPECIAL_PRINT_REGISTER BIT(20) #define NSP32_DEBUG_BUF_LEN 100 static void nsp32_message(const char *func, int line, char *type, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifndef NSP32_DEBUG printk("%snsp32: %s\n", type, buf); #else printk("%snsp32: %s (%d): %s\n", type, func, line, buf); #endif } #ifdef NSP32_DEBUG static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...) { va_list args; char buf[NSP32_DEBUG_BUF_LEN]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (mask & NSP32_DEBUG_MASK) { printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf); } } #endif #ifdef NSP32_DEBUG # include "nsp32_debug.c" #else # define show_command(arg) /* */ # define show_busphase(arg) /* */ # define show_autophase(arg) /* */ #endif /* * IDENTIFY Message */ static void nsp32_build_identify(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; int mode = FALSE; /* XXX: Auto DiscPriv detection is progressing... */ if (disc_priv == 0) { /* mode = TRUE; */ } data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++; data->msgout_len = pos; } /* * SDTR Message Routine */ static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt, unsigned char period, unsigned char offset) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++; data->msgoutbuf[pos] = EXTENDED_SDTR; pos++; data->msgoutbuf[pos] = period; pos++; data->msgoutbuf[pos] = offset; pos++; data->msgout_len = pos; } /* * No Operation Message */ static void nsp32_build_nop(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; if (pos != 0) { nsp32_msg(KERN_WARNING, "Some messages are already contained!"); return; } data->msgoutbuf[pos] = NOP; pos++; data->msgout_len = pos; } /* * Reject Message */ static void nsp32_build_reject(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int pos = data->msgout_len; data->msgoutbuf[pos] = MESSAGE_REJECT; pos++; data->msgout_len = pos; } /* * timer */ #if 0 static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time) { unsigned int base = SCpnt->host->io_port; nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time); if (time & (~TIMER_CNT_MASK)) { nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow"); } nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK); } #endif /* * set SCSI command and other parameter to asic, and start selection phase */ static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); nsp32_autoparam *param = data->autoparam; unsigned char phase; int i, ret; unsigned int msgout; u16_le s; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * check bus free */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if (phase != BUSMON_BUS_FREE) { nsp32_msg(KERN_WARNING, "bus busy"); show_busphase(phase & BUSMON_PHASE_MASK); SCpnt->result = DID_BUS_BUSY << 16; return FALSE; } /* * message out * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; return FALSE; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ } else { /* data->msgout_len > 3 */ msgout = 0; } // nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT)); // nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * setup asic parameter */ memset(param, 0, sizeof(nsp32_autoparam)); /* cdb */ for (i = 0; i < SCpnt->cmd_len; i++) { param->cdb[4 * i] = SCpnt->cmnd[i]; } /* outgoing messages */ param->msgout = cpu_to_le32(msgout); /* syncreg, ackwidth, target id, SREQ sampling rate */ param->syncreg = data->cur_target->syncreg; param->ackwidth = data->cur_target->ackwidth; param->target_id = BIT(host_id) | BIT(target); param->sample_reg = data->cur_target->sample_reg; // nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg); /* command control */ param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); /* transfer control */ s = 0; switch (data->trans_method) { case NSP32_TRANSFER_BUSMASTER: s |= BM_START; break; case NSP32_TRANSFER_MMIO: s |= CB_MMIO_MODE; break; case NSP32_TRANSFER_PIO: s |= CB_IO_MODE; break; default: nsp32_msg(KERN_ERR, "unknown trans_method"); break; } /* * OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits. * For bus master transfer, it's taken off. */ s |= (TRANSFER_GO | ALL_COUNTER_CLR); param->transfer_control = cpu_to_le16(s); /* sg table addr */ param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr); /* * transfer parameter to ASIC */ nsp32_write4(base, SGT_ADR, data->auto_paddr); nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER | AUTO_PARAMETER ); /* * Check arbitration */ ret = nsp32_arbitration(SCpnt, base); return ret; } /* * Selection with AUTO SCSI (without AUTO PARAMETER) */ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned int host_id = SCpnt->device->host->this_id; unsigned char target = scmd_id(SCpnt); unsigned char phase; int status; unsigned short command = 0; unsigned int msgout = 0; unsigned short execph; int i; nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in"); /* * IRQ disable */ nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); /* * check bus line */ phase = nsp32_read1(base, SCSI_BUS_MONITOR); if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) { nsp32_msg(KERN_WARNING, "bus busy"); SCpnt->result = DID_BUS_BUSY << 16; status = 1; goto out; } /* * clear execph */ execph = nsp32_read2(base, SCSI_EXECUTE_PHASE); /* * clear FIFO counter to set CDBs */ nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER); /* * set CDB0 - CDB15 */ for (i = 0; i < SCpnt->cmd_len; i++) { nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]); } nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]); /* * set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID */ nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target)); /* * set SCSI MSGOUT REG * * Note: If the range of msgout_len is 1 - 3, fill scsi_msgout. * over 3 messages needs another routine. */ if (data->msgout_len == 0) { nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!"); SCpnt->result = DID_ERROR << 16; status = 1; goto out; } else if (data->msgout_len > 0 && data->msgout_len <= 3) { msgout = 0; for (i = 0; i < data->msgout_len; i++) { /* * the sending order of the message is: * MCNT 3: MSG#0 -> MSG#1 -> MSG#2 * MCNT 2: MSG#1 -> MSG#2 * MCNT 1: MSG#2 */ msgout >>= 8; msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24); } msgout |= MV_VALID; /* MV valid */ msgout |= (unsigned int)data->msgout_len; /* len */ nsp32_write4(base, SCSI_MSG_OUT, msgout); } else { /* data->msgout_len > 3 */ nsp32_write4(base, SCSI_MSG_OUT, 0); } /* * set selection timeout(= 250ms) */ nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * set SREQ hazard killer sampling rate * * TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz. * check other internal clock! */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); /* * set SYNCREG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x", nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH), nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x", data->msgout_len, msgout); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ command = 0; command |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { command |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { command |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { command |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, command); /* * start AUTO SCSI, kick off arbitration */ command = (CLEAR_CDB_FIFO_POINTER | AUTOSCSI_START | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 | AUTO_ATN ); nsp32_write2(base, COMMAND_CONTROL, command); /* * Check arbitration */ status = nsp32_arbitration(SCpnt, base); out: /* * IRQ enable */ nsp32_write2(base, IRQ_CONTROL, 0); return status; } /* * Arbitration Status Check * * Note: Arbitration counter is waited during ARBIT_GO is not lifting. * Using udelay(1) consumes CPU time and system time, but * arbitration delay time is defined minimal 2.4us in SCSI * specification, thus udelay works as coarse grained wait timer. */ static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base) { unsigned char arbit; int status = TRUE; int time = 0; do { arbit = nsp32_read1(base, ARBIT_STATUS); time++; } while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 && (time <= ARBIT_TIMEOUT_TIME)); nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit: 0x%x, delay time: %d", arbit, time); if (arbit & ARBIT_WIN) { /* Arbitration succeeded */ SCpnt->result = DID_OK << 16; nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */ } else if (arbit & ARBIT_FAIL) { /* Arbitration failed */ SCpnt->result = DID_BUS_BUSY << 16; status = FALSE; } else { /* * unknown error or ARBIT_GO timeout, * something lock up! guess no connection. */ nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout"); SCpnt->result = DID_NO_CONNECT << 16; status = FALSE; } /* * clear Arbit */ nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR); return status; } /* * reselection * * Note: This reselection routine is called from msgin_occur, * reselection target id&lun must be already set. * SCSI-2 says IDENTIFY implies RESTORE_POINTER operation. */ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int host_id = SCpnt->device->host->this_id; unsigned int base = SCpnt->device->host->io_port; unsigned char tmpid, newid; nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter"); /* * calculate reselected SCSI ID */ tmpid = nsp32_read1(base, RESELECT_ID); tmpid &= (~BIT(host_id)); newid = 0; while (tmpid) { if (tmpid & 1) { break; } tmpid >>= 1; newid++; } /* * If reselected New ID:LUN is not existed * or current nexus is not existed, unexpected * reselection is occurred. Send reject message. */ if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) { nsp32_msg(KERN_WARNING, "unknown id/lun"); return FALSE; } else if(data->lunt[newid][newlun].SCpnt == NULL) { nsp32_msg(KERN_WARNING, "no SCSI command is processing"); return FALSE; } data->cur_id = newid; data->cur_lun = newlun; data->cur_target = &(data->target[newid]); data->cur_lunt = &(data->lunt[newid][newlun]); /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); return TRUE; } /* * nsp32_setup_sg_table - build scatter gather list for transfer data * with bus master. * * Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time. */ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; struct scatterlist *sg; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; int num, i; u32_le l; if (sgt == NULL) { nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); return FALSE; } num = scsi_dma_map(SCpnt); if (!num) return TRUE; else if (num < 0) return FALSE; else { scsi_for_each_sg(SCpnt, sg, num, i) { /* * Build nsp32_sglist, substitute sg dma addresses. */ sgt[i].addr = cpu_to_le32(sg_dma_address(sg)); sgt[i].len = cpu_to_le32(sg_dma_len(sg)); if (le32_to_cpu(sgt[i].len) > 0x10000) { nsp32_msg(KERN_ERR, "can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len)); return FALSE; } nsp32_dbg(NSP32_DEBUG_SGLIST, "num 0x%x : addr 0x%lx len 0x%lx", i, le32_to_cpu(sgt[i].addr), le32_to_cpu(sgt[i].len )); } /* set end mark */ l = le32_to_cpu(sgt[num-1].len); sgt[num-1].len = cpu_to_le32(l | SGTEND); } return TRUE; } static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target; nsp32_lunt *cur_lunt; int ret; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); if (data->CurrentSC != NULL) { nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); data->CurrentSC = NULL; SCpnt->result = DID_NO_CONNECT << 16; done(SCpnt); return 0; } /* check target ID is not same as this initiator ID */ if (scmd_id(SCpnt) == SCpnt->device->host->this_id) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } /* check target LUN is allowable value */ if (SCpnt->device->lun >= MAX_LUN) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun"); SCpnt->result = DID_BAD_TARGET << 16; done(SCpnt); return 0; } show_command(SCpnt); SCpnt->scsi_done = done; data->CurrentSC = SCpnt; SCpnt->SCp.Status = CHECK_CONDITION; SCpnt->SCp.Message = 0; scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt); SCpnt->SCp.this_residual = scsi_bufflen(SCpnt); SCpnt->SCp.buffer = NULL; SCpnt->SCp.buffers_residual = 0; /* initialize data */ data->msgout_len = 0; data->msgin_len = 0; cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]); cur_lunt->SCpnt = SCpnt; cur_lunt->save_datp = 0; cur_lunt->msgin03 = FALSE; data->cur_lunt = cur_lunt; data->cur_id = SCpnt->device->id; data->cur_lun = SCpnt->device->lun; ret = nsp32_setup_sg_table(SCpnt); if (ret == FALSE) { nsp32_msg(KERN_ERR, "SGT fail"); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return 0; } /* Build IDENTIFY */ nsp32_build_identify(SCpnt); /* * If target is the first time to transfer after the reset * (target don't have SDTR_DONE and SDTR_INITIATOR), sync * message SDTR is needed to do synchronous transfer. */ target = &data->target[scmd_id(SCpnt)]; data->cur_target = target; if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) { unsigned char period, offset; if (trans_mode != ASYNC_MODE) { nsp32_set_max_sync(data, target, &period, &offset); nsp32_build_sdtr(SCpnt, period, offset); target->sync_flag |= SDTR_INITIATOR; } else { nsp32_set_async(data, target); target->sync_flag |= SDTR_DONE; } nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR: entry: %d start_period: 0x%x offset: 0x%x\n", target->limit_entry, period, offset); } else if (target->sync_flag & SDTR_INITIATOR) { /* * It was negotiating SDTR with target, sending from the * initiator, but there are no chance to remove this flag. * Set async because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "SDTR_INITIATOR: fall back to async"); } else if (target->sync_flag & SDTR_TARGET) { /* * It was negotiating SDTR with target, sending from target, * but there are no chance to remove this flag. Set async * because we don't get proper negotiation. */ nsp32_set_async(data, target); target->sync_flag &= ~SDTR_TARGET; target->sync_flag |= SDTR_DONE; nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "Unknown SDTR from target is reached, fall back to async."); } nsp32_dbg(NSP32_DEBUG_TARGETFLAG, "target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x", SCpnt->device->id, target->sync_flag, target->syncreg, target->ackwidth); /* Selection */ if (auto_param == 0) { ret = nsp32_selection_autopara(SCpnt); } else { ret = nsp32_selection_autoscsi(SCpnt); } if (ret != TRUE) { nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail"); nsp32_scsi_done(SCpnt); } return 0; } static DEF_SCSI_QCMD(nsp32_queuecommand) /* initialize asic */ static int nsp32hw_init(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short irq_stat; unsigned long lc_reg; unsigned char power; lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE); if ((lc_reg & 0xff00) == 0) { lc_reg |= (0x20 << 8); nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff); } nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); do { irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat); } while (irq_stat & IRQSTATUS_ANY_IRQ); /* * Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is * designated by specification. */ if ((data->trans_method & NSP32_TRANSFER_PIO) || (data->trans_method & NSP32_TRANSFER_MMIO)) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40); } else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10); nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60); } else { nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode"); } nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x", nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT), nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT)); nsp32_index_write1(base, CLOCK_DIV, data->clock); nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD); nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */ /* * initialize MISC_WRRD register * * Note: Designated parameters is obeyed as following: * MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set. * MISC_MASTER_TERMINATION_SELECT: It must be set. * MISC_BMREQ_NEGATE_TIMING_SEL: It should be set. * MISC_AUTOSEL_TIMING_SEL: It should be set. * MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set. * MISC_DELAYED_BMSTART: It's selected for safety. * * Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then * we have to set TRANSFERCONTROL_BM_START as 0 and set * appropriate value before restarting bus master transfer. */ nsp32_index_write2(base, MISC_WR, (SCSI_DIRECTION_DETECTOR_SELECT | DELAYED_BMSTART | MASTER_TERMINATION_SELECT | BMREQ_NEGATE_TIMING_SEL | AUTOSEL_TIMING_SEL | BMSTOP_CHANGE2_NONDATA_PHASE)); nsp32_index_write1(base, TERM_PWR_CONTROL, 0); power = nsp32_index_read1(base, TERM_PWR_CONTROL); if (!(power & SENSE)) { nsp32_msg(KERN_INFO, "term power on"); nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR); } nsp32_write2(base, TIMER_SET, TIMER_STOP); nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */ nsp32_write1(base, SYNC_REG, 0); nsp32_write1(base, ACK_WIDTH, 0); nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME); /* * enable to select designated IRQ (except for * IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR) */ nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ | IRQSELECT_SCSIRESET_IRQ | IRQSELECT_FIFO_SHLD_IRQ | IRQSELECT_RESELECT_IRQ | IRQSELECT_PHASE_CHANGE_IRQ | IRQSELECT_AUTO_SCSI_SEQ_IRQ | // IRQSELECT_BMCNTERR_IRQ | IRQSELECT_TARGET_ABORT_IRQ | IRQSELECT_MASTER_ABORT_IRQ ); nsp32_write2(base, IRQ_CONTROL, 0); /* PCI LED off */ nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF); nsp32_index_write1(base, EXT_PORT, LED_OFF); return TRUE; } /* interrupt routine */ static irqreturn_t do_nsp32_isr(int irq, void *dev_id) { nsp32_hw_data *data = dev_id; unsigned int base = data->BaseAddress; struct scsi_cmnd *SCpnt = data->CurrentSC; unsigned short auto_stat, irq_stat, trans_stat; unsigned char busmon, busphase; unsigned long flags; int ret; int handled = 0; struct Scsi_Host *host = data->Host; spin_lock_irqsave(host->host_lock, flags); /* * IRQ check, then enable IRQ mask */ irq_stat = nsp32_read2(base, IRQ_STATUS); nsp32_dbg(NSP32_DEBUG_INTR, "enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat); /* is this interrupt comes from Ninja asic? */ if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) { nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat); goto out2; } handled = 1; nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); busmon = nsp32_read1(base, SCSI_BUS_MONITOR); busphase = busmon & BUSMON_PHASE_MASK; trans_stat = nsp32_read2(base, TRANSFER_STATUS); if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) { nsp32_msg(KERN_INFO, "card disconnect"); if (data->CurrentSC != NULL) { nsp32_msg(KERN_INFO, "clean up current SCSI command"); SCpnt->result = DID_BAD_TARGET << 16; nsp32_scsi_done(SCpnt); } goto out; } /* Timer IRQ */ if (irq_stat & IRQSTATUS_TIMER_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "timer stop"); nsp32_write2(base, TIMER_SET, TIMER_STOP); goto out; } /* SCSI reset */ if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) { nsp32_msg(KERN_INFO, "detected someone do bus reset"); nsp32_do_bus_reset(data); if (SCpnt != NULL) { SCpnt->result = DID_RESET << 16; nsp32_scsi_done(SCpnt); } goto out; } if (SCpnt == NULL) { nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); goto out; } /* * AutoSCSI Interrupt. * Note: This interrupt is occurred when AutoSCSI is finished. Then * check SCSIEXECUTEPHASE, and do appropriate action. Each phases are * recorded when AutoSCSI sequencer has been processed. */ if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) { /* getting SCSI executed phase */ auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE); nsp32_write2(base, SCSI_EXECUTE_PHASE, 0); /* Selection Timeout, go busfree phase. */ if (auto_stat & SELECTION_TIMEOUT) { nsp32_dbg(NSP32_DEBUG_INTR, "selection timeout occurred"); SCpnt->result = DID_TIME_OUT << 16; nsp32_scsi_done(SCpnt); goto out; } if (auto_stat & MSGOUT_PHASE) { /* * MsgOut phase was processed. * If MSG_IN_OCCUER is not set, then MsgOut phase is * completed. Thus, msgout_len must reset. Otherwise, * nothing to do here. If MSG_OUT_OCCUER is occurred, * then we will encounter the condition and check. */ if (!(auto_stat & MSG_IN_OCCUER) && (data->msgout_len <= 3)) { /* * !MSG_IN_OCCUER && msgout_len <=3 * ---> AutoSCSI with MSGOUTreg is processed. */ data->msgout_len = 0; }; nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed"); } if ((auto_stat & DATA_IN_PHASE) && (scsi_get_resid(SCpnt) > 0) && ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { printk( "auto+fifo\n"); //nsp32_pio_read(SCpnt); } if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) { /* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */ nsp32_dbg(NSP32_DEBUG_INTR, "Data in/out phase processed"); /* read BMCNT, SGT pointer addr */ nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx", nsp32_read4(base, BM_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx", nsp32_read4(base, SGT_ADR)); nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx", nsp32_read4(base, SACK_CNT)); nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", nsp32_read4(base, SAVED_SACK_CNT)); scsi_set_resid(SCpnt, 0); /* all data transferred! */ } /* * MsgIn Occur */ if (auto_stat & MSG_IN_OCCUER) { nsp32_msgin_occur(SCpnt, irq_stat, auto_stat); } /* * MsgOut Occur */ if (auto_stat & MSG_OUT_OCCUER) { nsp32_msgout_occur(SCpnt); } /* * Bus Free Occur */ if (auto_stat & BUS_FREE_OCCUER) { ret = nsp32_busfree_occur(SCpnt, auto_stat); if (ret == TRUE) { goto out; } } if (auto_stat & STATUS_PHASE) { /* * Read CSB and substitute CSB for SCpnt->result * to save status phase stutas byte. * scsi error handler checks host_byte (DID_*: * low level driver to indicate status), then checks * status_byte (SCSI status byte). */ SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN); } if (auto_stat & ILLEGAL_PHASE) { /* Illegal phase is detected. SACK is not back. */ nsp32_msg(KERN_WARNING, "AUTO SCSI ILLEGAL PHASE OCCUR!!!!"); /* TODO: currently we don't have any action... bus reset? */ /* * To send back SACK, assert, wait, and negate. */ nsp32_sack_assert(data); nsp32_wait_req(data, NEGATE); nsp32_sack_negate(data); } if (auto_stat & COMMAND_PHASE) { /* nothing to do */ nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed"); } if (auto_stat & AUTOSCSI_BUSY) { /* AutoSCSI is running */ } show_autophase(auto_stat); } /* FIFO_SHLD_IRQ */ if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ"); switch(busphase) { case BUSPHASE_DATA_OUT: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write"); //nsp32_pio_write(SCpnt); break; case BUSPHASE_DATA_IN: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read"); //nsp32_pio_read(SCpnt); break; case BUSPHASE_STATUS: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); break; default: nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase"); nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* Phase Change IRQ */ if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ"); switch(busphase) { case BUSPHASE_MESSAGE_IN: nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in"); nsp32_msgin_occur(SCpnt, irq_stat, 0); break; default: nsp32_msg(KERN_WARNING, "phase chg/other phase?"); nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n", irq_stat, trans_stat); show_busphase(busphase); break; } goto out; } /* PCI_IRQ */ if (irq_stat & IRQSTATUS_PCI_IRQ) { nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred"); /* Do nothing */ } /* BMCNTERR_IRQ */ if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) { nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! "); /* * TODO: To be implemented improving bus master * transfer reliability when BMCNTERR is occurred in * AutoSCSI phase described in specification. */ } #if 0 nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat); show_busphase(busphase); #endif out: /* disable IRQ mask */ nsp32_write2(base, IRQ_CONTROL, 0); out2: spin_unlock_irqrestore(host->host_lock, flags); nsp32_dbg(NSP32_DEBUG_INTR, "exit"); return IRQ_RETVAL(handled); } #undef SPRINTF #define SPRINTF(args...) seq_printf(m, ##args) static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host) { unsigned long flags; nsp32_hw_data *data; int hostno; unsigned int base; unsigned char mode_reg; int id, speed; long model; hostno = host->host_no; data = (nsp32_hw_data *)host->hostdata; base = host->io_port; SPRINTF("NinjaSCSI-32 status\n\n"); SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version); SPRINTF("SCSI host No.: %d\n", hostno); SPRINTF("IRQ: %d\n", host->irq); SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1); SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1); SPRINTF("sg_tablesize: %d\n", host->sg_tablesize); SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff); mode_reg = nsp32_index_read1(base, CHIP_MODE); model = data->pci_devid->driver_data; #ifdef CONFIG_PM SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no"); #endif SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]); spin_lock_irqsave(&(data->Lock), flags); SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC); spin_unlock_irqrestore(&(data->Lock), flags); SPRINTF("SDTR status\n"); for (id = 0; id < ARRAY_SIZE(data->target); id++) { SPRINTF("id %d: ", id); if (id == host->this_id) { SPRINTF("----- NinjaSCSI-32 host adapter\n"); continue; } if (data->target[id].sync_flag == SDTR_DONE) { if (data->target[id].period == 0 && data->target[id].offset == ASYNC_OFFSET ) { SPRINTF("async"); } else { SPRINTF(" sync"); } } else { SPRINTF(" none"); } if (data->target[id].period != 0) { speed = 1000000 / (data->target[id].period * 4); SPRINTF(" transfer %d.%dMB/s, offset %d", speed / 1000, speed % 1000, data->target[id].offset ); } SPRINTF("\n"); } return 0; } #undef SPRINTF /* * Reset parameters and call scsi_done for data->cur_lunt. * Be careful setting SCpnt->result = DID_* before calling this function. */ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; scsi_dma_unmap(SCpnt); /* * clear TRANSFERCONTROL_BM_START */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); /* * call scsi_done */ (*SCpnt->scsi_done)(SCpnt); /* * reset parameters */ data->cur_lunt->SCpnt = NULL; data->cur_lunt = NULL; data->cur_target = NULL; data->CurrentSC = NULL; } /* * Bus Free Occur * * Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase * with ACK reply when below condition is matched: * MsgIn 00: Command Complete. * MsgIn 02: Save Data Pointer. * MsgIn 04: Diconnect. * In other case, unexpected BUSFREE is detected. */ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph); show_autophase(execph); nsp32_write4(base, BM_CNT, 0); nsp32_write2(base, TRANSFER_CONTROL, 0); /* * MsgIn 02: Save Data Pointer * * VALID: * Save Data Pointer is received. Adjust pointer. * * NO-VALID: * SCSI-3 says if Save Data Pointer is not received, then we restart * processing and we can't adjust any SCSI data pointer in next data * phase. */ if (execph & MSGIN_02_VALID) { nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid"); /* * Check sack_cnt/saved_sack_cnt, then adjust sg table if * needed. */ if (!(execph & MSGIN_00_VALID) && ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) { unsigned int sacklen, s_sacklen; /* * Read SACK count and SAVEDSACK count, then compare. */ sacklen = nsp32_read4(base, SACK_CNT ); s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); /* * If SAVEDSACKCNT == 0, it means SavedDataPointer is * come after data transferring. */ if (s_sacklen > 0) { /* * Comparing between sack and savedsack to * check the condition of AutoMsgIn03. * * If they are same, set msgin03 == TRUE, * COMMANDCONTROL_AUTO_MSGIN_03 is enabled at * reselection. On the other hand, if they * aren't same, set msgin03 == FALSE, and * COMMANDCONTROL_AUTO_MSGIN_03 is disabled at * reselection. */ if (sacklen != s_sacklen) { data->cur_lunt->msgin03 = FALSE; } else { data->cur_lunt->msgin03 = TRUE; } nsp32_adjust_busfree(SCpnt, s_sacklen); } } /* This value has not substitude with valid value yet... */ //data->cur_lunt->save_datp = data->cur_datp; } else { /* * no processing. */ } if (execph & MSGIN_03_VALID) { /* MsgIn03 was valid to be processed. No need processing. */ } /* * target SDTR check */ if (data->cur_target->sync_flag & SDTR_INITIATOR) { /* * SDTR negotiation pulled by the initiator has not * finished yet. Fall back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } else if (data->cur_target->sync_flag & SDTR_TARGET) { /* * SDTR negotiation pulled by the target has been * negotiating. */ if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) { /* * If valid message is received, then * negotiation is succeeded. */ } else { /* * On the contrary, if unexpected bus free is * occurred, then negotiation is failed. Fall * back to ASYNC mode. */ nsp32_set_async(data, data->cur_target); } data->cur_target->sync_flag &= ~SDTR_TARGET; data->cur_target->sync_flag |= SDTR_DONE; } /* * It is always ensured by SCSI standard that initiator * switches into Bus Free Phase after * receiving message 00 (Command Complete), 04 (Disconnect). * It's the reason that processing here is valid. */ if (execph & MSGIN_00_VALID) { /* MsgIn 00: Command Complete */ nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete"); SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 0; nsp32_dbg(NSP32_DEBUG_BUSFREE, "normal end stat=0x%x resid=0x%x\n", SCpnt->SCp.Status, scsi_get_resid(SCpnt)); SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); nsp32_scsi_done(SCpnt); /* All operation is done */ return TRUE; } else if (execph & MSGIN_04_VALID) { /* MsgIn 04: Disconnect */ SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN); SCpnt->SCp.Message = 4; nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect"); return TRUE; } else { /* Unexpected bus free */ nsp32_msg(KERN_WARNING, "unexpected bus free occurred"); /* DID_ERROR? */ //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0); SCpnt->result = DID_ERROR << 16; nsp32_scsi_done(SCpnt); return TRUE; } return FALSE; } /* * nsp32_adjust_busfree - adjusting SG table * * Note: This driver adjust the SG table using SCSI ACK * counter instead of BMCNT counter! */ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; int old_entry = data->cur_entry; int new_entry; int sg_num = data->cur_lunt->sg_num; nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; unsigned int restlen, sentlen; u32_le len, addr; nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt)); /* adjust saved SACK count with 4 byte start address boundary */ s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; /* * calculate new_entry from sack count and each sgt[].len * calculate the byte which is intent to send */ sentlen = 0; for (new_entry = old_entry; new_entry < sg_num; new_entry++) { sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND); if (sentlen > s_sacklen) { break; } } /* all sgt is processed */ if (new_entry == sg_num) { goto last; } if (sentlen == s_sacklen) { /* XXX: confirm it's ok or not */ /* In this case, it's ok because we are at the head element of the sg. restlen is correctly calculated. */ } /* calculate the rest length for transferring */ restlen = sentlen - s_sacklen; /* update adjusting current SG table entry */ len = le32_to_cpu(sgt[new_entry].len); addr = le32_to_cpu(sgt[new_entry].addr); addr += (len - restlen); sgt[new_entry].addr = cpu_to_le32(addr); sgt[new_entry].len = cpu_to_le32(restlen); /* set cur_entry with new_entry */ data->cur_entry = new_entry; return; last: if (scsi_get_resid(SCpnt) < sentlen) { nsp32_msg(KERN_ERR, "resid underflow"); } scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen); nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt)); /* update hostdata and lun */ return; } /* * It's called MsgOut phase occur. * NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in * message out phase. It, however, has more than 3 messages, * HBA creates the interrupt and we have to process by hand. */ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; //unsigned short command; long new_sgtp; int i; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "enter: msgout_len: 0x%x", data->msgout_len); /* * If MsgOut phase is occurred without having any * message, then No_Operation is sent (SCSI-2). */ if (data->msgout_len == 0) { nsp32_build_nop(SCpnt); } /* * Set SGTP ADDR current entry for restarting AUTOSCSI, * because SGTP is incremented next point. * There is few statement in the specification... */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); /* * send messages */ for (i = 0; i < data->msgout_len; i++) { nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "%d : 0x%x", i, data->msgoutbuf[i]); /* * Check REQ is asserted. */ nsp32_wait_req(data, ASSERT); if (i == (data->msgout_len - 1)) { /* * If the last message, set the AutoSCSI restart * before send back the ack message. AutoSCSI * restart automatically negate ATN signal. */ //command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); //nsp32_restart_autoscsi(SCpnt, command); nsp32_write2(base, COMMAND_CONTROL, (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART | AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02 )); } /* * Write data with SACK, then wait sack is * automatically negated. */ nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]); nsp32_wait_sack(data, NEGATE); nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n", nsp32_read1(base, SCSI_BUS_MONITOR)); }; data->msgout_len = 0; nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit"); } /* * Restart AutoSCSI * * Note: Restarting AutoSCSI needs set: * SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL */ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = data->BaseAddress; unsigned short transfer = 0; nsp32_dbg(NSP32_DEBUG_RESTART, "enter"); if (data->cur_target == NULL || data->cur_lunt == NULL) { nsp32_msg(KERN_ERR, "Target or Lun is invalid"); } /* * set SYNC_REG * Don't set BM_START_ADR before setting this register. */ nsp32_write1(base, SYNC_REG, data->cur_target->syncreg); /* * set ACKWIDTH */ nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth); /* * set SREQ hazard killer sampling rate */ nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg); /* * set SGT ADDR (physical address) */ nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr); /* * set TRANSFER CONTROL REG */ transfer = 0; transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { if (scsi_bufflen(SCpnt) > 0) { transfer |= BM_START; } } else if (data->trans_method & NSP32_TRANSFER_MMIO) { transfer |= CB_MMIO_MODE; } else if (data->trans_method & NSP32_TRANSFER_PIO) { transfer |= CB_IO_MODE; } nsp32_write2(base, TRANSFER_CONTROL, transfer); /* * restart AutoSCSI * * TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ? */ command |= (CLEAR_CDB_FIFO_POINTER | AUTO_COMMAND_PHASE | AUTOSCSI_RESTART ); nsp32_write2(base, COMMAND_CONTROL, command); nsp32_dbg(NSP32_DEBUG_RESTART, "exit"); } /* * cannot run automatically message in occur */ static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt, unsigned long irq_status, unsigned short execph) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; unsigned char msg; unsigned char msgtype; unsigned char newlun; unsigned short command = 0; int msgclear = TRUE; long new_sgtp; int ret; /* * read first message * Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure * of Message-In have to be processed before sending back SCSI ACK. */ msg = nsp32_read1(base, SCSI_DATA_IN); data->msginbuf[(unsigned char)data->msgin_len] = msg; msgtype = data->msginbuf[0]; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x", data->msgin_len, msg, msgtype); /* * TODO: We need checking whether bus phase is message in? */ /* * assert SCSI ACK */ nsp32_sack_assert(data); /* * processing IDENTIFY */ if (msgtype & 0x80) { if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) { /* Invalid (non reselect) phase */ goto reject; } newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */ ret = nsp32_reselection(SCpnt, newlun); if (ret == TRUE) { goto restart; } else { goto reject; } } /* * processing messages except for IDENTIFY * * TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO. */ switch (msgtype) { /* * 1-byte message */ case COMMAND_COMPLETE: case DISCONNECT: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg(KERN_WARNING, "unexpected message of AutoSCSI MsgIn: 0x%x", msg); break; case RESTORE_POINTERS: /* * AutoMsgIn03 is disabled, and HBA gets this message. */ if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) { unsigned int s_sacklen; s_sacklen = nsp32_read4(base, SAVED_SACK_CNT); if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) { nsp32_adjust_busfree(SCpnt, s_sacklen); } else { /* No need to rewrite SGT */ } } data->cur_lunt->msgin03 = FALSE; /* Update with the new value */ /* reset SACK/SavedACK counter (or ALL clear?) */ nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * set new sg pointer */ new_sgtp = data->cur_lunt->sglun_paddr + (data->cur_lunt->cur_entry * sizeof(nsp32_sgtable)); nsp32_write4(base, SGT_ADR, new_sgtp); break; case SAVE_POINTERS: /* * These messages should not be occurred. * They should be processed on AutoSCSI sequencer. */ nsp32_msg (KERN_WARNING, "unexpected message of AutoSCSI MsgIn: SAVE_POINTERS"); break; case MESSAGE_REJECT: /* If previous message_out is sending SDTR, and get message_reject from target, SDTR negotiation is failed */ if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* * Current target is negotiating SDTR, but it's * failed. Fall back to async transfer mode, and set * SDTR_DONE. */ nsp32_set_async(data, data->cur_target); data->cur_target->sync_flag &= ~SDTR_INITIATOR; data->cur_target->sync_flag |= SDTR_DONE; } break; case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* queue tag is not supported currently */ nsp32_msg (KERN_WARNING, "unsupported message: 0x%x", msgtype); break; case INITIATE_RECOVERY: /* staring ECA (Extended Contingent Allegiance) state. */ /* This message is declined in SPI2 or later. */ goto reject; /* * 2-byte message */ case SIMPLE_QUEUE_TAG: case 0x23: /* * 0x23: Ignore_Wide_Residue is not declared in scsi.h. * No support is needed. */ if (data->msgin_len >= 1) { goto reject; } /* current position is 1-byte of 2 byte */ msgclear = FALSE; break; /* * extended message */ case EXTENDED_MESSAGE: if (data->msgin_len < 1) { /* * Current position does not reach 2-byte * (2-byte is extended message length). */ msgclear = FALSE; break; } if ((data->msginbuf[1] + 1) > data->msgin_len) { /* * Current extended message has msginbuf[1] + 2 * (msgin_len starts counting from 0, so buf[1] + 1). * If current message position is not finished, * continue receiving message. */ msgclear = FALSE; break; } /* * Reach here means regular length of each type of * extended messages. */ switch (data->msginbuf[2]) { case EXTENDED_MODIFY_DATA_POINTER: /* TODO */ goto reject; /* not implemented yet */ break; case EXTENDED_SDTR: /* * Exchange this message between initiator and target. */ if (data->msgin_len != EXTENDED_SDTR_LEN + 1) { /* * received inappropriate message. */ goto reject; break; } nsp32_analyze_sdtr(SCpnt); break; case EXTENDED_EXTENDED_IDENTIFY: /* SCSI-I only, not supported. */ goto reject; /* not implemented yet */ break; case EXTENDED_WDTR: goto reject; /* not implemented yet */ break; default: goto reject; } break; default: goto reject; } restart: if (msgclear == TRUE) { data->msgin_len = 0; /* * If restarting AutoSCSI, but there are some message to out * (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0 * (MV_VALID = 0). When commandcontrol is written with * AutoSCSI restart, at the same time MsgOutOccur should be * happened (however, such situation is really possible...?). */ if (data->msgout_len > 0) { nsp32_write4(base, SCSI_MSG_OUT, 0); command |= AUTO_ATN; } /* * restart AutoSCSI * If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed. */ command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02); /* * If current msgin03 is TRUE, then flag on. */ if (data->cur_lunt->msgin03 == TRUE) { command |= AUTO_MSGIN_03; } data->cur_lunt->msgin03 = FALSE; } else { data->msgin_len++; } /* * restart AutoSCSI */ nsp32_restart_autoscsi(SCpnt, command); /* * wait SCSI REQ negate for REQ-ACK handshake */ nsp32_wait_req(data, NEGATE); /* * negate SCSI ACK */ nsp32_sack_negate(data); nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: nsp32_msg(KERN_WARNING, "invalid or unsupported MessageIn, rejected. " "current msg: 0x%x (len: 0x%x), processing msg: 0x%x", msg, data->msgin_len, msgtype); nsp32_build_reject(SCpnt); data->msgin_len = 0; goto restart; } /* * */ static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target = data->cur_target; nsp32_sync_table *synct; unsigned char get_period = data->msginbuf[3]; unsigned char get_offset = data->msginbuf[4]; int entry; int syncnum; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter"); synct = data->synct; syncnum = data->syncnum; /* * If this inititor sent the SDTR message, then target responds SDTR, * initiator SYNCREG, ACKWIDTH from SDTR parameter. * Messages are not appropriate, then send back reject message. * If initiator did not send the SDTR, but target sends SDTR, * initiator calculator the appropriate parameter and send back SDTR. */ if (target->sync_flag & SDTR_INITIATOR) { /* * Initiator sent SDTR, the target responds and * send back negotiation SDTR. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR"); target->sync_flag &= ~SDTR_INITIATOR; target->sync_flag |= SDTR_DONE; /* * offset: */ if (get_offset > SYNC_OFFSET) { /* * Negotiation is failed, the target send back * unexpected offset value. */ goto reject; } if (get_offset == ASYNC_OFFSET) { /* * Negotiation is succeeded, the target want * to fall back into asynchronous transfer mode. */ goto async; } /* * period: * Check whether sync period is too short. If too short, * fall back to async mode. If it's ok, then investigate * the received sync period. If sync period is acceptable * between sync table start_period and end_period, then * set this I_T nexus as sent offset and period. * If it's not acceptable, send back reject and fall back * to async mode. */ if (get_period < data->synct[0].period_num) { /* * Negotiation is failed, the target send back * unexpected period value. */ goto reject; } entry = nsp32_search_period_entry(data, target, get_period); if (entry < 0) { /* * Target want to use long period which is not * acceptable NinjaSCSI-32Bi/UDE. */ goto reject; } /* * Set new sync table and offset in this I_T nexus. */ nsp32_set_sync_entry(data, target, entry, get_offset); } else { /* Target send SDTR to initiator. */ nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR"); target->sync_flag |= SDTR_INITIATOR; /* offset: */ if (get_offset > SYNC_OFFSET) { /* send back as SYNC_OFFSET */ get_offset = SYNC_OFFSET; } /* period: */ if (get_period < data->synct[0].period_num) { get_period = data->synct[0].period_num; } entry = nsp32_search_period_entry(data, target, get_period); if (get_offset == ASYNC_OFFSET || entry < 0) { nsp32_set_async(data, target); nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET); } else { nsp32_set_sync_entry(data, target, entry, get_offset); nsp32_build_sdtr(SCpnt, get_period, get_offset); } } target->period = get_period; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit"); return; reject: /* * If the current message is unacceptable, send back to the target * with reject message. */ nsp32_build_reject(SCpnt); async: nsp32_set_async(data, target); /* set as ASYNC transfer mode */ target->period = 0; nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async"); return; } /* * Search config entry number matched in sync_table from given * target and speed period value. If failed to search, return negative value. */ static int nsp32_search_period_entry(nsp32_hw_data *data, nsp32_target *target, unsigned char period) { int i; if (target->limit_entry >= data->syncnum) { nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!"); target->limit_entry = 0; } for (i = target->limit_entry; i < data->syncnum; i++) { if (period >= data->synct[i].start_period && period <= data->synct[i].end_period) { break; } } /* * Check given period value is over the sync_table value. * If so, return max value. */ if (i == data->syncnum) { i = -1; } return i; } /* * target <-> initiator use ASYNC transfer */ static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target) { unsigned char period = data->synct[target->limit_entry].period_num; target->offset = ASYNC_OFFSET; target->period = 0; target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET); target->ackwidth = 0; target->sample_reg = 0; nsp32_dbg(NSP32_DEBUG_SYNC, "set async"); } /* * target <-> initiator use maximum SYNC transfer */ static void nsp32_set_max_sync(nsp32_hw_data *data, nsp32_target *target, unsigned char *period, unsigned char *offset) { unsigned char period_num, ackwidth; period_num = data->synct[target->limit_entry].period_num; *period = data->synct[target->limit_entry].start_period; ackwidth = data->synct[target->limit_entry].ackwidth; *offset = SYNC_OFFSET; target->syncreg = TO_SYNCREG(period_num, *offset); target->ackwidth = ackwidth; target->offset = *offset; target->sample_reg = 0; /* disable SREQ sampling */ } /* * target <-> initiator use entry number speed */ static void nsp32_set_sync_entry(nsp32_hw_data *data, nsp32_target *target, int entry, unsigned char offset) { unsigned char period, ackwidth, sample_rate; period = data->synct[entry].period_num; ackwidth = data->synct[entry].ackwidth; offset = offset; sample_rate = data->synct[entry].sample_rate; target->syncreg = TO_SYNCREG(period, offset); target->ackwidth = ackwidth; target->offset = offset; target->sample_reg = sample_rate | SAMPLING_ENABLE; nsp32_dbg(NSP32_DEBUG_SYNC, "set sync"); } /* * It waits until SCSI REQ becomes assertion or negation state. * * Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then * connected target responds SCSI REQ negation. We have to wait * SCSI REQ becomes negation in order to negate SCSI ACK signal for * REQ-ACK handshake. */ static void nsp32_wait_req(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, req_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* REQ is BIT(5) */ req_bit = (state == ASSERT ? BUSMON_REQ : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_REQ) == req_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit); } /* * It waits until SCSI SACK becomes assertion or negation state. */ static void nsp32_wait_sack(nsp32_hw_data *data, int state) { unsigned int base = data->BaseAddress; int wait_time = 0; unsigned char bus, ack_bit; if (!((state == ASSERT) || (state == NEGATE))) { nsp32_msg(KERN_ERR, "unknown state designation"); } /* ACK is BIT(4) */ ack_bit = (state == ASSERT ? BUSMON_ACK : 0); do { bus = nsp32_read1(base, SCSI_BUS_MONITOR); if ((bus & BUSMON_ACK) == ack_bit) { nsp32_dbg(NSP32_DEBUG_WAIT, "wait_time: %d", wait_time); return; } udelay(1); wait_time++; } while (wait_time < REQSACK_TIMEOUT_TIME); nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit); } /* * assert SCSI ACK * * Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1. */ static void nsp32_sack_assert(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB); nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * negate SCSI ACK */ static void nsp32_sack_negate(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned char busctrl; busctrl = nsp32_read1(base, SCSI_BUS_CONTROL); busctrl &= ~BUSCTL_ACK; nsp32_write1(base, SCSI_BUS_CONTROL, busctrl); } /* * Note: n_io_port is defined as 0x7f because I/O register port is * assigned as: * 0x800-0x8ff: memory mapped I/O port * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) * 0xc00-0xfff: CardBus status registers */ static int nsp32_detect(struct pci_dev *pdev) { struct Scsi_Host *host; /* registered host structure */ struct resource *res; nsp32_hw_data *data; int ret; int i, j; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); /* * register this HBA as SCSI device */ host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); if (host == NULL) { nsp32_msg (KERN_ERR, "failed to scsi register"); goto err; } /* * set nsp32_hw_data */ data = (nsp32_hw_data *)host->hostdata; memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data)); host->irq = data->IrqNumber; host->io_port = data->BaseAddress; host->unique_id = data->BaseAddress; host->n_io_port = data->NumAddress; host->base = (unsigned long)data->MmioAddress; data->Host = host; spin_lock_init(&(data->Lock)); data->cur_lunt = NULL; data->cur_target = NULL; /* * Bus master transfer mode is supported currently. */ data->trans_method = NSP32_TRANSFER_BUSMASTER; /* * Set clock div, CLOCK_4 (HBA has own external clock, and * dividing * 100ns/4). * Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet. */ data->clock = CLOCK_4; /* * Select appropriate nsp32_sync_table and set I_CLOCKDIV. */ switch (data->clock) { case CLOCK_4: /* If data->clock is CLOCK_4, then select 40M sync table. */ data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); break; case CLOCK_2: /* If data->clock is CLOCK_2, then select 20M sync table. */ data->synct = nsp32_sync_table_20M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M); break; case PCICLK: /* If data->clock is PCICLK, then select pci sync table. */ data->synct = nsp32_sync_table_pci; data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci); break; default: nsp32_msg(KERN_WARNING, "Invalid clock div is selected, set CLOCK_4."); /* Use default value CLOCK_4 */ data->clock = CLOCK_4; data->synct = nsp32_sync_table_40M; data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M); } /* * setup nsp32_lunt */ /* * setup DMA */ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); goto scsi_unregister; } /* * allocate autoparam DMA resource. */ data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr)); if (data->autoparam == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto scsi_unregister; } /* * allocate scatter-gather DMA resource. */ data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE, &(data->sg_paddr)); if (data->sg_list == NULL) { nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); goto free_autoparam; } for (i = 0; i < ARRAY_SIZE(data->lunt); i++) { for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) { int offset = i * ARRAY_SIZE(data->lunt[0]) + j; nsp32_lunt tmp = { .SCpnt = NULL, .save_datp = 0, .msgin03 = FALSE, .sg_num = 0, .cur_entry = 0, .sglun = &(data->sg_list[offset]), .sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)), }; data->lunt[i][j] = tmp; } } /* * setup target */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &(data->target[i]); target->limit_entry = 0; target->sync_flag = 0; nsp32_set_async(data, target); } /* * EEPROM check */ ret = nsp32_getprom_param(data); if (ret == FALSE) { data->resettime = 3; /* default 3 */ } /* * setup HBA */ nsp32hw_init(data); snprintf(data->info_str, sizeof(data->info_str), "NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x", host->irq, host->io_port, host->n_io_port); /* * SCSI bus reset * * Note: It's important to reset SCSI bus in initialization phase. * NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when * system is coming up, so SCSI devices connected to HBA is set as * un-asynchronous mode. It brings the merit that this HBA is * ready to start synchronous transfer without any preparation, * but we are difficult to control transfer speed. In addition, * it prevents device transfer speed from effecting EEPROM start-up * SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as * Auto Mode, then FAST-10M is selected when SCSI devices are * connected same or more than 4 devices. It should be avoided * depending on this specification. Thus, resetting the SCSI bus * restores all connected SCSI devices to asynchronous mode, then * this driver set SDTR safely later, and we can control all SCSI * device transfer mode. */ nsp32_do_bus_reset(data); ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data); if (ret < 0) { nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 " "SCSI PCI controller. Interrupt: %d", host->irq); goto free_sg_list; } /* * PCI IO register */ res = request_region(host->io_port, host->n_io_port, "nsp32"); if (res == NULL) { nsp32_msg(KERN_ERR, "I/O region 0x%lx+0x%lx is already used", data->BaseAddress, data->NumAddress); goto free_irq; } ret = scsi_add_host(host, &pdev->dev); if (ret) { nsp32_msg(KERN_ERR, "failed to add scsi host"); goto free_region; } scsi_scan_host(host); pci_set_drvdata(pdev, host); return 0; free_region: release_region(host->io_port, host->n_io_port); free_irq: free_irq(host->irq, data); free_sg_list: pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); free_autoparam: pci_free_consistent(pdev, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); scsi_unregister: scsi_host_put(host); err: return 1; } static int nsp32_release(struct Scsi_Host *host) { nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; if (data->autoparam) { pci_free_consistent(data->Pci, sizeof(nsp32_autoparam), data->autoparam, data->auto_paddr); } if (data->sg_list) { pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE, data->sg_list, data->sg_paddr); } if (host->irq) { free_irq(host->irq, data); } if (host->io_port && host->n_io_port) { release_region(host->io_port, host->n_io_port); } if (data->MmioAddress) { iounmap(data->MmioAddress); } return 0; } static const char *nsp32_info(struct Scsi_Host *shpnt) { nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata; return data->info_str; } /**************************************************************************** * error handler */ static int nsp32_eh_abort(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; nsp32_msg(KERN_WARNING, "abort"); if (data->cur_lunt->SCpnt == NULL) { nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed"); return FAILED; } if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) { /* reset SDTR negotiation */ data->cur_target->sync_flag = 0; nsp32_set_async(data, data->cur_target); } nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write2(base, BM_CNT, 0); SCpnt->result = DID_ABORT << 16; nsp32_scsi_done(SCpnt); nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success"); return SUCCESS; } static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; unsigned int base = SCpnt->device->host->io_port; spin_lock_irq(SCpnt->device->host->host_lock); nsp32_msg(KERN_INFO, "Bus Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* SCSI bus reset is succeeded at any time. */ } static void nsp32_do_bus_reset(nsp32_hw_data *data) { unsigned int base = data->BaseAddress; unsigned short intrdat; int i; nsp32_dbg(NSP32_DEBUG_BUSRESET, "in"); /* * stop all transfer * clear TRANSFERCONTROL_BM_START * clear counter */ nsp32_write2(base, TRANSFER_CONTROL, 0); nsp32_write4(base, BM_CNT, 0); nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK); /* * fall back to asynchronous transfer mode * initialize SDTR negotiation flag */ for (i = 0; i < ARRAY_SIZE(data->target); i++) { nsp32_target *target = &data->target[i]; target->sync_flag = 0; nsp32_set_async(data, target); } /* * reset SCSI bus */ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); mdelay(RESET_HOLD_TIME / 1000); nsp32_write1(base, SCSI_BUS_CONTROL, 0); for(i = 0; i < 5; i++) { intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat); } data->CurrentSC = NULL; } static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt) { struct Scsi_Host *host = SCpnt->device->host; unsigned int base = SCpnt->device->host->io_port; nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; nsp32_msg(KERN_INFO, "Host Reset"); nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt); spin_lock_irq(SCpnt->device->host->host_lock); nsp32hw_init(data); nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK); nsp32_do_bus_reset(data); nsp32_write2(base, IRQ_CONTROL, 0); spin_unlock_irq(SCpnt->device->host->host_lock); return SUCCESS; /* Host reset is succeeded at any time. */ } /************************************************************************** * EEPROM handler */ /* * getting EEPROM parameter */ static int nsp32_getprom_param(nsp32_hw_data *data) { int vendor = data->pci_devid->vendor; int device = data->pci_devid->device; int ret, val, i; /* * EEPROM checking. */ ret = nsp32_prom_read(data, 0x7e); if (ret != 0x55) { nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret); return FALSE; } ret = nsp32_prom_read(data, 0x7f); if (ret != 0xaa) { nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret); return FALSE; } /* * check EEPROM type */ if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_WORKBIT_STANDARD) { ret = nsp32_getprom_c16(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) { ret = nsp32_getprom_at24(data); } else if (vendor == PCI_VENDOR_ID_WORKBIT && device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) { ret = nsp32_getprom_at24(data); } else { nsp32_msg(KERN_WARNING, "Unknown EEPROM"); ret = FALSE; } /* for debug : SPROM data full checking */ for (i = 0; i <= 0x1f; i++) { val = nsp32_prom_read(data, i); nsp32_dbg(NSP32_DEBUG_EEPROM, "rom address 0x%x : 0x%x", i, val); } return ret; } /* * AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M * 0x07 : HBA Synchronous Transfer Period * Value 0: AutoSync, 1: Manual Setting * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Bus Termination * Value 0: Auto[ON], 1: ON, 2: OFF * 0x11 : Not Used? (0) * 0x12 : Bus Reset Delay Time (0x03) * 0x13 : Bootable CD Support * Value 0: Disable, 1: Enable * 0x14 : Device Scan * Bit 7 6 5 4 3 2 1 0 * | <-----------------> * | SCSI ID: Value 0: Skip, 1: YES * |-> Value 0: ALL scan, Value 1: Manual * 0x15 - 0x1b : Not Used? (0) * 0x1c : Constant? (0x01) (clock div?) * 0x1d - 0x7c : Not Used (0xff) * 0x7d : Not Used? (0xff) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_at24(nsp32_hw_data *data) { int ret, i; int auto_sync; nsp32_target *target; int entry; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x12); /* * HBA Synchronous Transfer Period * * Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says * that if auto_sync is 0 (auto), and connected SCSI devices are * same or lower than 3, then transfer speed is set as ULTRA-20M. * On the contrary if connected SCSI devices are same or higher * than 4, then transfer speed is set as FAST-10M. * * I break this rule. The number of connected SCSI devices are * only ignored. If auto_sync is 0 (auto), then transfer speed is * forced as ULTRA-20M. */ ret = nsp32_prom_read(data, 0x07); switch (ret) { case 0: auto_sync = TRUE; break; case 1: auto_sync = FALSE; break; default: nsp32_msg(KERN_WARNING, "Unsupported Auto Sync mode. Fall back to manual mode."); auto_sync = TRUE; } if (trans_mode == ULTRA20M_MODE) { auto_sync = TRUE; } /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; if (auto_sync == TRUE) { target->limit_entry = 0; /* set as ULTRA20M */ } else { ret = nsp32_prom_read(data, i); entry = nsp32_search_period_entry(data, target, ret); if (entry < 0) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } } return TRUE; } /* * C16 110 (I-O Data: SC-NBD) data map: * * ROMADDR * 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6) * Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC * 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync) * 0x08 - 0x0f : Not Used? (0x0) * 0x10 : Transfer Mode * Value 0: PIO, 1: Busmater * 0x11 : Bus Reset Delay Time (0x00-0x20) * 0x12 : Bus Termination * Value 0: Disable, 1: Enable * 0x13 - 0x19 : Disconnection * Value 0: Disable, 1: Enable * 0x1a - 0x7c : Not Used? (0) * 0x7d : Not Used? (0xf8) * 0x7e : Constant (0x55), Validity signature * 0x7f : Constant (0xaa), Validity signature */ static int nsp32_getprom_c16(nsp32_hw_data *data) { int ret, i; nsp32_target *target; int entry, val; /* * Reset time which is designated by EEPROM. * * TODO: Not used yet. */ data->resettime = nsp32_prom_read(data, 0x11); /* * each device Synchronous Transfer Period */ for (i = 0; i < NSP32_HOST_SCSIID; i++) { target = &data->target[i]; ret = nsp32_prom_read(data, i); switch (ret) { case 0: /* 20MB/s */ val = 0x0c; break; case 1: /* 10MB/s */ val = 0x19; break; case 2: /* 5MB/s */ val = 0x32; break; case 3: /* ASYNC */ val = 0x00; break; default: /* default 20MB/s */ val = 0x0c; break; } entry = nsp32_search_period_entry(data, target, val); if (entry < 0 || trans_mode == ULTRA20M_MODE) { /* search failed... set maximum speed */ entry = 0; } target->limit_entry = entry; } return TRUE; } /* * Atmel AT24C01A (drived in 5V) serial EEPROM routines */ static int nsp32_prom_read(nsp32_hw_data *data, int romaddr) { int i, val; /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: W for dummy write */ nsp32_prom_write_bit(data, 0); /* ack */ nsp32_prom_write_bit(data, 0); /* word address */ for (i = 7; i >= 0; i--) { nsp32_prom_write_bit(data, ((romaddr >> i) & 1)); } /* ack */ nsp32_prom_write_bit(data, 0); /* start condition */ nsp32_prom_start(data); /* device address */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 1); /* 1 */ nsp32_prom_write_bit(data, 0); /* 0 */ nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */ nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */ /* R/W: R */ nsp32_prom_write_bit(data, 1); /* ack */ nsp32_prom_write_bit(data, 0); /* data... */ val = 0; for (i = 7; i >= 0; i--) { val += (nsp32_prom_read_bit(data) << i); } /* no ack */ nsp32_prom_write_bit(data, 1); /* stop condition */ nsp32_prom_stop(data); return val; } static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val) { int base = data->BaseAddress; int tmp; tmp = nsp32_index_read1(base, SERIAL_ROM_CTL); if (val == 0) { tmp &= ~bit; } else { tmp |= bit; } nsp32_index_write1(base, SERIAL_ROM_CTL, tmp); udelay(10); } static int nsp32_prom_get(nsp32_hw_data *data, int bit) { int base = data->BaseAddress; int tmp, ret; if (bit != SDA) { nsp32_msg(KERN_ERR, "return value is not appropriate"); return 0; } tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit; if (tmp == 0) { ret = 0; } else { ret = 1; } udelay(10); return ret; } static void nsp32_prom_start (nsp32_hw_data *data) { /* start condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting * SDA 1->0 is start condition */ nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_stop (nsp32_hw_data *data) { /* stop condition */ nsp32_prom_set(data, SCL, 1); nsp32_prom_set(data, SDA, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ nsp32_prom_set(data, SDA, 1); nsp32_prom_set(data, SCL, 0); } static void nsp32_prom_write_bit(nsp32_hw_data *data, int val) { /* write */ nsp32_prom_set(data, SDA, val); nsp32_prom_set(data, SCL, 1 ); nsp32_prom_set(data, SCL, 0 ); } static int nsp32_prom_read_bit(nsp32_hw_data *data) { int val; /* read */ nsp32_prom_set(data, ENA, 0); /* input mode */ nsp32_prom_set(data, SCL, 1); val = nsp32_prom_get(data, SDA); nsp32_prom_set(data, SCL, 0); nsp32_prom_set(data, ENA, 1); /* output mode */ return val; } /************************************************************************** * Power Management */ #ifdef CONFIG_PM /* Device suspended */ static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host); pci_save_state (pdev); pci_disable_device (pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } /* Device woken up */ static int nsp32_resume(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata; unsigned short reg; nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host); pci_set_power_state(pdev, PCI_D0); pci_enable_wake (pdev, PCI_D0, 0); pci_restore_state (pdev); reg = nsp32_read2(data->BaseAddress, INDEX_REG); nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg); if (reg == 0xffff) { nsp32_msg(KERN_INFO, "missing device. abort resume."); return 0; } nsp32hw_init (data); nsp32_do_bus_reset(data); nsp32_msg(KERN_INFO, "resume success"); return 0; } #endif /************************************************************************ * PCI/Cardbus probe/remove routine */ static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret; nsp32_hw_data *data = &nsp32_data_base; nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); ret = pci_enable_device(pdev); if (ret) { nsp32_msg(KERN_ERR, "failed to enable pci device"); return ret; } data->Pci = pdev; data->pci_devid = id; data->IrqNumber = pdev->irq; data->BaseAddress = pci_resource_start(pdev, 0); data->NumAddress = pci_resource_len (pdev, 0); data->MmioAddress = pci_ioremap_bar(pdev, 1); data->MmioLength = pci_resource_len (pdev, 1); pci_set_master(pdev); ret = nsp32_detect(pdev); nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", pdev->irq, data->MmioAddress, data->MmioLength, pci_name(pdev), nsp32_model[id->driver_data]); nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret); return ret; } static void nsp32_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); scsi_remove_host(host); nsp32_release(host); scsi_host_put(host); } static struct pci_driver nsp32_driver = { .name = "nsp32", .id_table = nsp32_pci_table, .probe = nsp32_probe, .remove = nsp32_remove, #ifdef CONFIG_PM .suspend = nsp32_suspend, .resume = nsp32_resume, #endif }; /********************************************************************* * Moule entry point */ static int __init init_nsp32(void) { nsp32_msg(KERN_INFO, "loading..."); return pci_register_driver(&nsp32_driver); } static void __exit exit_nsp32(void) { nsp32_msg(KERN_INFO, "unloading..."); pci_unregister_driver(&nsp32_driver); } module_init(init_nsp32); module_exit(exit_nsp32); /* end */
gpl-2.0
budi79/deka-kernel-msm7x30-3.0
drivers/mtd/maps/sa1100-flash.c
2784
9712
/* * Flash memory access on SA11x0 based devices * * (C) 2000 Nicolas Pitre <nico@fluxnic.net> */ #include <linux/module.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/concat.h> #include <mach/hardware.h> #include <asm/sizes.h> #include <asm/mach/flash.h> #if 0 /* * This is here for documentation purposes only - until these people * submit their machine types. It will be gone January 2005. */ static struct mtd_partition consus_partitions[] = { { .name = "Consus boot firmware", .offset = 0, .size = 0x00040000, .mask_flags = MTD_WRITABLE, /* force read-only */ }, { .name = "Consus kernel", .offset = 0x00040000, .size = 0x00100000, .mask_flags = 0, }, { .name = "Consus disk", .offset = 0x00140000, /* The rest (up to 16M) for jffs. We could put 0 and make it find the size automatically, but right now i have 32 megs. jffs will use all 32 megs if given the chance, and this leads to horrible problems when you try to re-flash the image because blob won't erase the whole partition. */ .size = 0x01000000 - 0x00140000, .mask_flags = 0, }, { /* this disk is a secondary disk, which can be used as needed, for simplicity, make it the size of the other consus partition, although realistically it could be the remainder of the disk (depending on the file system used) */ .name = "Consus disk2", .offset = 0x01000000, .size = 0x01000000 - 0x00140000, .mask_flags = 0, } }; /* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */ static struct mtd_partition frodo_partitions[] = { { .name = "bootloader", .size = 0x00040000, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE }, { .name = "bootloader params", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE }, { .name = "kernel", .size = 0x00100000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE }, { .name = "ramdisk", .size = 0x00400000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE }, { .name = "file system", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; static struct mtd_partition jornada56x_partitions[] = { { .name = "bootldr", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE, }, { .name = "rootfs", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static void jornada56x_set_vpp(int vpp) { if (vpp) GPSR = GPIO_GPIO26; else GPCR = GPIO_GPIO26; GPDR |= GPIO_GPIO26; } /* * Machine Phys Size set_vpp * Consus : SA1100_CS0_PHYS SZ_32M * Frodo : SA1100_CS0_PHYS SZ_32M * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp */ #endif struct sa_subdev_info { char name[16]; struct map_info map; struct mtd_info *mtd; struct flash_platform_data *plat; }; struct sa_info { struct mtd_partition *parts; struct mtd_info *mtd; int num_subdev; unsigned int nr_parts; struct sa_subdev_info subdev[0]; }; static void sa1100_set_vpp(struct map_info *map, int on) { struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); subdev->plat->set_vpp(on); } static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) { if (subdev->mtd) map_destroy(subdev->mtd); if (subdev->map.virt) iounmap(subdev->map.virt); release_mem_region(subdev->map.phys, subdev->map.size); } static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res) { unsigned long phys; unsigned int size; int ret; phys = res->start; size = res->end - phys + 1; /* * Retrieve the bankwidth from the MSC registers. * We currently only implement CS0 and CS1 here. */ switch (phys) { default: printk(KERN_WARNING "SA1100 flash: unknown base address " "0x%08lx, assuming CS0\n", phys); case SA1100_CS0_PHYS: subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; break; case SA1100_CS1_PHYS: subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4; break; } if (!request_mem_region(phys, size, subdev->name)) { ret = -EBUSY; goto out; } if (subdev->plat->set_vpp) subdev->map.set_vpp = sa1100_set_vpp; subdev->map.phys = phys; subdev->map.size = size; subdev->map.virt = ioremap(phys, size); if (!subdev->map.virt) { ret = -ENOMEM; goto err; } simple_map_init(&subdev->map); /* * Now let's probe for the actual flash. Do it here since * specific machine settings might have been set above. */ subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map); if (subdev->mtd == NULL) { ret = -ENXIO; goto err; } subdev->mtd->owner = THIS_MODULE; printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n", phys, (unsigned)(subdev->mtd->size >> 20), subdev->map.bankwidth * 8); return 0; err: sa1100_destroy_subdev(subdev); out: return ret; } static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat) { int i; if (info->mtd) { mtd_device_unregister(info->mtd); if (info->mtd != info->subdev[0].mtd) mtd_concat_destroy(info->mtd); } kfree(info->parts); for (i = info->num_subdev - 1; i >= 0; i--) sa1100_destroy_subdev(&info->subdev[i]); kfree(info); if (plat->exit) plat->exit(); } static struct sa_info *__devinit sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) { struct sa_info *info; int nr, size, i, ret = 0; /* * Count number of devices. */ for (nr = 0; ; nr++) if (!platform_get_resource(pdev, IORESOURCE_MEM, nr)) break; if (nr == 0) { ret = -ENODEV; goto out; } size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr; /* * Allocate the map_info structs in one go. */ info = kzalloc(size, GFP_KERNEL); if (!info) { ret = -ENOMEM; goto out; } if (plat->init) { ret = plat->init(); if (ret) goto err; } /* * Claim and then map the memory regions. */ for (i = 0; i < nr; i++) { struct sa_subdev_info *subdev = &info->subdev[i]; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) break; subdev->map.name = subdev->name; sprintf(subdev->name, "%s-%d", plat->name, i); subdev->plat = plat; ret = sa1100_probe_subdev(subdev, res); if (ret) break; } info->num_subdev = i; /* * ENXIO is special. It means we didn't find a chip when we probed. */ if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0)) goto err; /* * If we found one device, don't bother with concat support. If * we found multiple devices, use concat if we have it available, * otherwise fail. Either way, it'll be called "sa1100". */ if (info->num_subdev == 1) { strcpy(info->subdev[0].name, plat->name); info->mtd = info->subdev[0].mtd; ret = 0; } else if (info->num_subdev > 1) { struct mtd_info *cdev[nr]; /* * We detected multiple devices. Concatenate them together. */ for (i = 0; i < info->num_subdev; i++) cdev[i] = info->subdev[i].mtd; info->mtd = mtd_concat_create(cdev, info->num_subdev, plat->name); if (info->mtd == NULL) ret = -ENXIO; } if (ret == 0) return info; err: sa1100_destroy(info, plat); out: return ERR_PTR(ret); } static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static int __devinit sa1100_mtd_probe(struct platform_device *pdev) { struct flash_platform_data *plat = pdev->dev.platform_data; struct mtd_partition *parts; const char *part_type = NULL; struct sa_info *info; int err, nr_parts = 0; if (!plat) return -ENODEV; info = sa1100_setup_mtd(pdev, plat); if (IS_ERR(info)) { err = PTR_ERR(info); goto out; } /* * Partition selection stuff. */ nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); if (nr_parts > 0) { info->parts = parts; part_type = "dynamic"; } else { parts = plat->parts; nr_parts = plat->nr_parts; part_type = "static"; } if (nr_parts == 0) printk(KERN_NOTICE "SA1100 flash: no partition info " "available, registering whole flash\n"); else printk(KERN_NOTICE "SA1100 flash: using %s partition " "definition\n", part_type); mtd_device_register(info->mtd, parts, nr_parts); info->nr_parts = nr_parts; platform_set_drvdata(pdev, info); err = 0; out: return err; } static int __exit sa1100_mtd_remove(struct platform_device *pdev) { struct sa_info *info = platform_get_drvdata(pdev); struct flash_platform_data *plat = pdev->dev.platform_data; platform_set_drvdata(pdev, NULL); sa1100_destroy(info, plat); return 0; } #ifdef CONFIG_PM static void sa1100_mtd_shutdown(struct platform_device *dev) { struct sa_info *info = platform_get_drvdata(dev); if (info && info->mtd->suspend(info->mtd) == 0) info->mtd->resume(info->mtd); } #else #define sa1100_mtd_shutdown NULL #endif static struct platform_driver sa1100_mtd_driver = { .probe = sa1100_mtd_probe, .remove = __exit_p(sa1100_mtd_remove), .shutdown = sa1100_mtd_shutdown, .driver = { .name = "sa1100-mtd", .owner = THIS_MODULE, }, }; static int __init sa1100_mtd_init(void) { return platform_driver_register(&sa1100_mtd_driver); } static void __exit sa1100_mtd_exit(void) { platform_driver_unregister(&sa1100_mtd_driver); } module_init(sa1100_mtd_init); module_exit(sa1100_mtd_exit); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("SA1100 CFI map driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sa1100-mtd");
gpl-2.0
prashmohan/lxc-fork
drivers/pps/clients/pps_parport.c
3040
6427
/* * pps_parport.c -- kernel parallel port PPS client * * * Copyright (C) 2009 Alexander Gordeev <lasaine@lvk.cs.msu.su> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * TODO: * implement echo over SEL pin */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/irqnr.h> #include <linux/time.h> #include <linux/parport.h> #include <linux/pps_kernel.h> #define DRVDESC "parallel port PPS client" /* module parameters */ #define CLEAR_WAIT_MAX 100 #define CLEAR_WAIT_MAX_ERRORS 5 static unsigned int clear_wait = 100; MODULE_PARM_DESC(clear_wait, "Maximum number of port reads when polling for signal clear," " zero turns clear edge capture off entirely"); module_param(clear_wait, uint, 0); /* internal per port structure */ struct pps_client_pp { struct pardevice *pardev; /* parport device */ struct pps_device *pps; /* PPS device */ unsigned int cw; /* port clear timeout */ unsigned int cw_err; /* number of timeouts */ }; static inline int signal_is_set(struct parport *port) { return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0; } /* parport interrupt handler */ static void parport_irq(void *handle) { struct pps_event_time ts_assert, ts_clear; struct pps_client_pp *dev = handle; struct parport *port = dev->pardev->port; unsigned int i; unsigned long flags; /* first of all we get the time stamp... */ pps_get_ts(&ts_assert); if (dev->cw == 0) /* clear edge capture disabled */ goto out_assert; /* try capture the clear edge */ /* We have to disable interrupts here. The idea is to prevent * other interrupts on the same processor to introduce random * lags while polling the port. Reading from IO port is known * to take approximately 1us while other interrupt handlers can * take much more potentially. * * Interrupts won't be disabled for a long time because the * number of polls is limited by clear_wait parameter which is * kept rather low. So it should never be an issue. */ local_irq_save(flags); /* check the signal (no signal means the pulse is lost this time) */ if (!signal_is_set(port)) { local_irq_restore(flags); dev_err(dev->pps->dev, "lost the signal\n"); goto out_assert; } /* poll the port until the signal is unset */ for (i = dev->cw; i; i--) if (!signal_is_set(port)) { pps_get_ts(&ts_clear); local_irq_restore(flags); dev->cw_err = 0; goto out_both; } local_irq_restore(flags); /* timeout */ dev->cw_err++; if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) { dev_err(dev->pps->dev, "disabled clear edge capture after %d" " timeouts\n", dev->cw_err); dev->cw = 0; dev->cw_err = 0; } out_assert: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); return; out_both: /* fire assert event */ pps_event(dev->pps, &ts_assert, PPS_CAPTUREASSERT, NULL); /* fire clear event */ pps_event(dev->pps, &ts_clear, PPS_CAPTURECLEAR, NULL); return; } /* the PPS echo function */ static void pps_echo(struct pps_device *pps, int event, void *data) { dev_info(pps->dev, "echo %s %s\n", event & PPS_CAPTUREASSERT ? "assert" : "", event & PPS_CAPTURECLEAR ? "clear" : ""); } static void parport_attach(struct parport *port) { struct pps_client_pp *device; struct pps_source_info info = { .name = KBUILD_MODNAME, .path = "", .mode = PPS_CAPTUREBOTH | \ PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \ PPS_ECHOASSERT | PPS_ECHOCLEAR | \ PPS_CANWAIT | PPS_TSFMT_TSPEC, .echo = pps_echo, .owner = THIS_MODULE, .dev = NULL }; device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL); if (!device) { pr_err("memory allocation failed, not attaching\n"); return; } device->pardev = parport_register_device(port, KBUILD_MODNAME, NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device); if (!device->pardev) { pr_err("couldn't register with %s\n", port->name); goto err_free; } if (parport_claim_or_block(device->pardev) < 0) { pr_err("couldn't claim %s\n", port->name); goto err_unregister_dev; } device->pps = pps_register_source(&info, PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR); if (device->pps == NULL) { pr_err("couldn't register PPS source\n"); goto err_release_dev; } device->cw = clear_wait; port->ops->enable_irq(port); pr_info("attached to %s\n", port->name); return; err_release_dev: parport_release(device->pardev); err_unregister_dev: parport_unregister_device(device->pardev); err_free: kfree(device); } static void parport_detach(struct parport *port) { struct pardevice *pardev = port->cad; struct pps_client_pp *device; /* FIXME: oooh, this is ugly! */ if (strcmp(pardev->name, KBUILD_MODNAME)) /* not our port */ return; device = pardev->private; port->ops->disable_irq(port); pps_unregister_source(device->pps); parport_release(pardev); parport_unregister_device(pardev); kfree(device); } static struct parport_driver pps_parport_driver = { .name = KBUILD_MODNAME, .attach = parport_attach, .detach = parport_detach, }; /* module staff */ static int __init pps_parport_init(void) { int ret; pr_info(DRVDESC "\n"); if (clear_wait > CLEAR_WAIT_MAX) { pr_err("clear_wait value should be not greater" " then %d\n", CLEAR_WAIT_MAX); return -EINVAL; } ret = parport_register_driver(&pps_parport_driver); if (ret) { pr_err("unable to register with parport\n"); return ret; } return 0; } static void __exit pps_parport_exit(void) { parport_unregister_driver(&pps_parport_driver); } module_init(pps_parport_init); module_exit(pps_parport_exit); MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>"); MODULE_DESCRIPTION(DRVDESC); MODULE_LICENSE("GPL");
gpl-2.0
ignacio28/android_kernel_lge_msm8610-2
net/sunrpc/xprtrdma/rpc_rdma.c
5088
28365
/* * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the BSD-type * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Network Appliance, Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * rpc_rdma.c * * This file contains the guts of the RPC RDMA protocol, and * does marshaling/unmarshaling, etc. It is also where interfacing * to the Linux RPC framework lives. */ #include "xprt_rdma.h" #include <linux/highmem.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_TRANS #endif enum rpcrdma_chunktype { rpcrdma_noch = 0, rpcrdma_readch, rpcrdma_areadch, rpcrdma_writech, rpcrdma_replych }; #ifdef RPC_DEBUG static const char transfertypes[][12] = { "pure inline", /* no chunks */ " read chunk", /* some argument via rdma read */ "*read chunk", /* entire request via rdma read */ "write chunk", /* some result via rdma write */ "reply chunk" /* entire reply via rdma write */ }; #endif /* * Chunk assembly from upper layer xdr_buf. * * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk * elements. Segments are then coalesced when registered, if possible * within the selected memreg mode. * * Note, this routine is never called if the connection's memory * registration strategy is 0 (bounce buffers). */ static int rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos, enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs) { int len, n = 0, p; int page_base; struct page **ppages; if (pos == 0 && xdrbuf->head[0].iov_len) { seg[n].mr_page = NULL; seg[n].mr_offset = xdrbuf->head[0].iov_base; seg[n].mr_len = xdrbuf->head[0].iov_len; ++n; } len = xdrbuf->page_len; ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT); page_base = xdrbuf->page_base & ~PAGE_MASK; p = 0; while (len && n < nsegs) { seg[n].mr_page = ppages[p]; seg[n].mr_offset = (void *)(unsigned long) page_base; seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len); BUG_ON(seg[n].mr_len > PAGE_SIZE); len -= seg[n].mr_len; ++n; ++p; page_base = 0; /* page offset only applies to first page */ } /* Message overflows the seg array */ if (len && n == nsegs) return 0; if (xdrbuf->tail[0].iov_len) { /* the rpcrdma protocol allows us to omit any trailing * xdr pad bytes, saving the server an RDMA operation. */ if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) return n; if (n == nsegs) /* Tail remains, but we're out of segments */ return 0; seg[n].mr_page = NULL; seg[n].mr_offset = xdrbuf->tail[0].iov_base; seg[n].mr_len = xdrbuf->tail[0].iov_len; ++n; } return n; } /* * Create read/write chunk lists, and reply chunks, for RDMA * * Assume check against THRESHOLD has been done, and chunks are required. * Assume only encoding one list entry for read|write chunks. The NFSv3 * protocol is simple enough to allow this as it only has a single "bulk * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.) * * When used for a single reply chunk (which is a special write * chunk used for the entire reply, rather than just the data), it * is used primarily for READDIR and READLINK which would otherwise * be severely size-limited by a small rdma inline read max. The server * response will come back as an RDMA Write, followed by a message * of type RDMA_NOMSG carrying the xid and length. As a result, reply * chunks do not provide data alignment, however they do not require * "fixup" (moving the response to the upper layer buffer) either. * * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64): * * Read chunklist (a linked list): * N elements, position P (same P for all chunks of same arg!): * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0 * * Write chunklist (a list of (one) counted array): * N elements: * 1 - N - HLOO - HLOO - ... - HLOO - 0 * * Reply chunk (a counted array): * N elements: * 1 - N - HLOO - HLOO - ... - HLOO */ static unsigned int rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type) { struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt); int nsegs, nchunks = 0; unsigned int pos; struct rpcrdma_mr_seg *seg = req->rl_segments; struct rpcrdma_read_chunk *cur_rchunk = NULL; struct rpcrdma_write_array *warray = NULL; struct rpcrdma_write_chunk *cur_wchunk = NULL; __be32 *iptr = headerp->rm_body.rm_chunks; if (type == rpcrdma_readch || type == rpcrdma_areadch) { /* a read chunk - server will RDMA Read our memory */ cur_rchunk = (struct rpcrdma_read_chunk *) iptr; } else { /* a write or reply chunk - server will RDMA Write our memory */ *iptr++ = xdr_zero; /* encode a NULL read chunk list */ if (type == rpcrdma_replych) *iptr++ = xdr_zero; /* a NULL write chunk list */ warray = (struct rpcrdma_write_array *) iptr; cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1); } if (type == rpcrdma_replych || type == rpcrdma_areadch) pos = 0; else pos = target->head[0].iov_len; nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS); if (nsegs == 0) return 0; do { /* bind/register the memory, then build chunk from result. */ int n = rpcrdma_register_external(seg, nsegs, cur_wchunk != NULL, r_xprt); if (n <= 0) goto out; if (cur_rchunk) { /* read */ cur_rchunk->rc_discrim = xdr_one; /* all read chunks have the same "position" */ cur_rchunk->rc_position = htonl(pos); cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); xdr_encode_hyper( (__be32 *)&cur_rchunk->rc_target.rs_offset, seg->mr_base); dprintk("RPC: %s: read chunk " "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__, seg->mr_len, (unsigned long long)seg->mr_base, seg->mr_rkey, pos, n < nsegs ? "more" : "last"); cur_rchunk++; r_xprt->rx_stats.read_chunk_count++; } else { /* write/reply */ cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); xdr_encode_hyper( (__be32 *)&cur_wchunk->wc_target.rs_offset, seg->mr_base); dprintk("RPC: %s: %s chunk " "elem %d@0x%llx:0x%x (%s)\n", __func__, (type == rpcrdma_replych) ? "reply" : "write", seg->mr_len, (unsigned long long)seg->mr_base, seg->mr_rkey, n < nsegs ? "more" : "last"); cur_wchunk++; if (type == rpcrdma_replych) r_xprt->rx_stats.reply_chunk_count++; else r_xprt->rx_stats.write_chunk_count++; r_xprt->rx_stats.total_rdma_request += seg->mr_len; } nchunks++; seg += n; nsegs -= n; } while (nsegs); /* success. all failures return above */ req->rl_nchunks = nchunks; BUG_ON(nchunks == 0); BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) && (nchunks > 3)); /* * finish off header. If write, marshal discrim and nchunks. */ if (cur_rchunk) { iptr = (__be32 *) cur_rchunk; *iptr++ = xdr_zero; /* finish the read chunk list */ *iptr++ = xdr_zero; /* encode a NULL write chunk list */ *iptr++ = xdr_zero; /* encode a NULL reply chunk */ } else { warray->wc_discrim = xdr_one; warray->wc_nchunks = htonl(nchunks); iptr = (__be32 *) cur_wchunk; if (type == rpcrdma_writech) { *iptr++ = xdr_zero; /* finish the write chunk list */ *iptr++ = xdr_zero; /* encode a NULL reply chunk */ } } /* * Return header size. */ return (unsigned char *)iptr - (unsigned char *)headerp; out: for (pos = 0; nchunks--;) pos += rpcrdma_deregister_external( &req->rl_segments[pos], r_xprt, NULL); return 0; } /* * Copy write data inline. * This function is used for "small" requests. Data which is passed * to RPC via iovecs (or page list) is copied directly into the * pre-registered memory buffer for this request. For small amounts * of data, this is efficient. The cutoff value is tunable. */ static int rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad) { int i, npages, curlen; int copy_len; unsigned char *srcp, *destp; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); int page_base; struct page **ppages; destp = rqst->rq_svec[0].iov_base; curlen = rqst->rq_svec[0].iov_len; destp += curlen; /* * Do optional padding where it makes sense. Alignment of write * payload can help the server, if our setting is accurate. */ pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/); if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH) pad = 0; /* don't pad this request */ dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n", __func__, pad, destp, rqst->rq_slen, curlen); copy_len = rqst->rq_snd_buf.page_len; if (rqst->rq_snd_buf.tail[0].iov_len) { curlen = rqst->rq_snd_buf.tail[0].iov_len; if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { memmove(destp + copy_len, rqst->rq_snd_buf.tail[0].iov_base, curlen); r_xprt->rx_stats.pullup_copy_count += curlen; } dprintk("RPC: %s: tail destp 0x%p len %d\n", __func__, destp + copy_len, curlen); rqst->rq_svec[0].iov_len += curlen; } r_xprt->rx_stats.pullup_copy_count += copy_len; page_base = rqst->rq_snd_buf.page_base; ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT); page_base &= ~PAGE_MASK; npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; for (i = 0; copy_len && i < npages; i++) { curlen = PAGE_SIZE - page_base; if (curlen > copy_len) curlen = copy_len; dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", __func__, i, destp, copy_len, curlen); srcp = kmap_atomic(ppages[i]); memcpy(destp, srcp+page_base, curlen); kunmap_atomic(srcp); rqst->rq_svec[0].iov_len += curlen; destp += curlen; copy_len -= curlen; page_base = 0; } /* header now contains entire send message */ return pad; } /* * Marshal a request: the primary job of this routine is to choose * the transfer modes. See comments below. * * Uses multiple RDMA IOVs for a request: * [0] -- RPC RDMA header, which uses memory from the *start* of the * preregistered buffer that already holds the RPC data in * its middle. * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol. * [2] -- optional padding. * [3] -- if padded, header only in [1] and data here. */ int rpcrdma_marshal_req(struct rpc_rqst *rqst) { struct rpc_xprt *xprt = rqst->rq_task->tk_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); struct rpcrdma_req *req = rpcr_to_rdmar(rqst); char *base; size_t hdrlen, rpclen, padlen; enum rpcrdma_chunktype rtype, wtype; struct rpcrdma_msg *headerp; /* * rpclen gets amount of data in first buffer, which is the * pre-registered buffer. */ base = rqst->rq_svec[0].iov_base; rpclen = rqst->rq_svec[0].iov_len; /* build RDMA header in private area at front */ headerp = (struct rpcrdma_msg *) req->rl_base; /* don't htonl XID, it's already done in request */ headerp->rm_xid = rqst->rq_xid; headerp->rm_vers = xdr_one; headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests); headerp->rm_type = htonl(RDMA_MSG); /* * Chunks needed for results? * * o If the expected result is under the inline threshold, all ops * return as inline (but see later). * o Large non-read ops return as a single reply chunk. * o Large read ops return data as write chunk(s), header as inline. * * Note: the NFS code sending down multiple result segments implies * the op is one of read, readdir[plus], readlink or NFSv4 getacl. */ /* * This code can handle read chunks, write chunks OR reply * chunks -- only one type. If the request is too big to fit * inline, then we will choose read chunks. If the request is * a READ, then use write chunks to separate the file data * into pages; otherwise use reply chunks. */ if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) wtype = rpcrdma_noch; else if (rqst->rq_rcv_buf.page_len == 0) wtype = rpcrdma_replych; else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) wtype = rpcrdma_writech; else wtype = rpcrdma_replych; /* * Chunks needed for arguments? * * o If the total request is under the inline threshold, all ops * are sent as inline. * o Large non-write ops are sent with the entire message as a * single read chunk (protocol 0-position special case). * o Large write ops transmit data as read chunk(s), header as * inline. * * Note: the NFS code sending down multiple argument segments * implies the op is a write. * TBD check NFSv4 setacl */ if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) rtype = rpcrdma_noch; else if (rqst->rq_snd_buf.page_len == 0) rtype = rpcrdma_areadch; else rtype = rpcrdma_readch; /* The following simplification is not true forever */ if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) wtype = rpcrdma_noch; BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch); if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS && (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) { /* forced to "pure inline"? */ dprintk("RPC: %s: too much data (%d/%d) for inline\n", __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len); return -1; } hdrlen = 28; /*sizeof *headerp;*/ padlen = 0; /* * Pull up any extra send data into the preregistered buffer. * When padding is in use and applies to the transfer, insert * it and change the message type. */ if (rtype == rpcrdma_noch) { padlen = rpcrdma_inline_pullup(rqst, RPCRDMA_INLINE_PAD_VALUE(rqst)); if (padlen) { headerp->rm_type = htonl(RDMA_MSGP); headerp->rm_body.rm_padded.rm_align = htonl(RPCRDMA_INLINE_PAD_VALUE(rqst)); headerp->rm_body.rm_padded.rm_thresh = htonl(RPCRDMA_INLINE_PAD_THRESH); headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero; headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ BUG_ON(wtype != rpcrdma_noch); } else { headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero; headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero; headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero; /* new length after pullup */ rpclen = rqst->rq_svec[0].iov_len; /* * Currently we try to not actually use read inline. * Reply chunks have the desirable property that * they land, packed, directly in the target buffers * without headers, so they require no fixup. The * additional RDMA Write op sends the same amount * of data, streams on-the-wire and adds no overhead * on receive. Therefore, we request a reply chunk * for non-writes wherever feasible and efficient. */ if (wtype == rpcrdma_noch && r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER) wtype = rpcrdma_replych; } } /* * Marshal chunks. This routine will return the header length * consumed by marshaling. */ if (rtype != rpcrdma_noch) { hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf, headerp, rtype); wtype = rtype; /* simplify dprintk */ } else if (wtype != rpcrdma_noch) { hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf, headerp, wtype); } if (hdrlen == 0) return -1; dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" " headerp 0x%p base 0x%p lkey 0x%x\n", __func__, transfertypes[wtype], hdrlen, rpclen, padlen, headerp, base, req->rl_iov.lkey); /* * initialize send_iov's - normally only two: rdma chunk header and * single preregistered RPC header buffer, but if padding is present, * then use a preregistered (and zeroed) pad buffer between the RPC * header and any write data. In all non-rdma cases, any following * data has been copied into the RPC header buffer. */ req->rl_send_iov[0].addr = req->rl_iov.addr; req->rl_send_iov[0].length = hdrlen; req->rl_send_iov[0].lkey = req->rl_iov.lkey; req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base); req->rl_send_iov[1].length = rpclen; req->rl_send_iov[1].lkey = req->rl_iov.lkey; req->rl_niovs = 2; if (padlen) { struct rpcrdma_ep *ep = &r_xprt->rx_ep; req->rl_send_iov[2].addr = ep->rep_pad.addr; req->rl_send_iov[2].length = padlen; req->rl_send_iov[2].lkey = ep->rep_pad.lkey; req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen; req->rl_send_iov[3].length = rqst->rq_slen - rpclen; req->rl_send_iov[3].lkey = req->rl_iov.lkey; req->rl_niovs = 4; } return 0; } /* * Chase down a received write or reply chunklist to get length * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) */ static int rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp) { unsigned int i, total_len; struct rpcrdma_write_chunk *cur_wchunk; i = ntohl(**iptrp); /* get array count */ if (i > max) return -1; cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1); total_len = 0; while (i--) { struct rpcrdma_segment *seg = &cur_wchunk->wc_target; ifdebug(FACILITY) { u64 off; xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", __func__, ntohl(seg->rs_length), (unsigned long long)off, ntohl(seg->rs_handle)); } total_len += ntohl(seg->rs_length); ++cur_wchunk; } /* check and adjust for properly terminated write chunk */ if (wrchunk) { __be32 *w = (__be32 *) cur_wchunk; if (*w++ != xdr_zero) return -1; cur_wchunk = (struct rpcrdma_write_chunk *) w; } if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) return -1; *iptrp = (__be32 *) cur_wchunk; return total_len; } /* * Scatter inline received data back into provided iov's. */ static void rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) { int i, npages, curlen, olen; char *destp; struct page **ppages; int page_base; curlen = rqst->rq_rcv_buf.head[0].iov_len; if (curlen > copy_len) { /* write chunk header fixup */ curlen = copy_len; rqst->rq_rcv_buf.head[0].iov_len = curlen; } dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n", __func__, srcp, copy_len, curlen); /* Shift pointer for first receive segment only */ rqst->rq_rcv_buf.head[0].iov_base = srcp; srcp += curlen; copy_len -= curlen; olen = copy_len; i = 0; rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen; page_base = rqst->rq_rcv_buf.page_base; ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); page_base &= ~PAGE_MASK; if (copy_len && rqst->rq_rcv_buf.page_len) { npages = PAGE_ALIGN(page_base + rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT; for (; i < npages; i++) { curlen = PAGE_SIZE - page_base; if (curlen > copy_len) curlen = copy_len; dprintk("RPC: %s: page %d" " srcp 0x%p len %d curlen %d\n", __func__, i, srcp, copy_len, curlen); destp = kmap_atomic(ppages[i]); memcpy(destp + page_base, srcp, curlen); flush_dcache_page(ppages[i]); kunmap_atomic(destp); srcp += curlen; copy_len -= curlen; if (copy_len == 0) break; page_base = 0; } rqst->rq_rcv_buf.page_len = olen - copy_len; } else rqst->rq_rcv_buf.page_len = 0; if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { curlen = copy_len; if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) curlen = rqst->rq_rcv_buf.tail[0].iov_len; if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", __func__, srcp, copy_len, curlen); rqst->rq_rcv_buf.tail[0].iov_len = curlen; copy_len -= curlen; ++i; } else rqst->rq_rcv_buf.tail[0].iov_len = 0; if (pad) { /* implicit padding on terminal chunk */ unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; while (pad--) p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; } if (copy_len) dprintk("RPC: %s: %d bytes in" " %d extra segments (%d lost)\n", __func__, olen, i, copy_len); /* TBD avoid a warning from call_decode() */ rqst->rq_private_buf = rqst->rq_rcv_buf; } /* * This function is called when an async event is posted to * the connection which changes the connection state. All it * does at this point is mark the connection up/down, the rpc * timers do the rest. */ void rpcrdma_conn_func(struct rpcrdma_ep *ep) { struct rpc_xprt *xprt = ep->rep_xprt; spin_lock_bh(&xprt->transport_lock); if (++xprt->connect_cookie == 0) /* maintain a reserved value */ ++xprt->connect_cookie; if (ep->rep_connected > 0) { if (!xprt_test_and_set_connected(xprt)) xprt_wake_pending_tasks(xprt, 0); } else { if (xprt_test_and_clear_connected(xprt)) xprt_wake_pending_tasks(xprt, -ENOTCONN); } spin_unlock_bh(&xprt->transport_lock); } /* * This function is called when memory window unbind which we are waiting * for completes. Just use rr_func (zeroed by upcall) to signal completion. */ static void rpcrdma_unbind_func(struct rpcrdma_rep *rep) { wake_up(&rep->rr_unbind); } /* * Called as a tasklet to do req/reply match and complete a request * Errors must result in the RPC task either being awakened, or * allowed to timeout, to discover the errors at that time. */ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) { struct rpcrdma_msg *headerp; struct rpcrdma_req *req; struct rpc_rqst *rqst; struct rpc_xprt *xprt = rep->rr_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); __be32 *iptr; int i, rdmalen, status; /* Check status. If bad, signal disconnect and return rep to pool */ if (rep->rr_len == ~0U) { rpcrdma_recv_buffer_put(rep); if (r_xprt->rx_ep.rep_connected == 1) { r_xprt->rx_ep.rep_connected = -EIO; rpcrdma_conn_func(&r_xprt->rx_ep); } return; } if (rep->rr_len < 28) { dprintk("RPC: %s: short/invalid reply\n", __func__); goto repost; } headerp = (struct rpcrdma_msg *) rep->rr_base; if (headerp->rm_vers != xdr_one) { dprintk("RPC: %s: invalid version %d\n", __func__, ntohl(headerp->rm_vers)); goto repost; } /* Get XID and try for a match. */ spin_lock(&xprt->transport_lock); rqst = xprt_lookup_rqst(xprt, headerp->rm_xid); if (rqst == NULL) { spin_unlock(&xprt->transport_lock); dprintk("RPC: %s: reply 0x%p failed " "to match any request xid 0x%08x len %d\n", __func__, rep, headerp->rm_xid, rep->rr_len); repost: r_xprt->rx_stats.bad_reply_count++; rep->rr_func = rpcrdma_reply_handler; if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) rpcrdma_recv_buffer_put(rep); return; } /* get request object */ req = rpcr_to_rdmar(rqst); if (req->rl_reply) { spin_unlock(&xprt->transport_lock); dprintk("RPC: %s: duplicate reply 0x%p to RPC " "request 0x%p: xid 0x%08x\n", __func__, rep, req, headerp->rm_xid); goto repost; } dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" " RPC request 0x%p xid 0x%08x\n", __func__, rep, req, rqst, headerp->rm_xid); /* from here on, the reply is no longer an orphan */ req->rl_reply = rep; /* check for expected message types */ /* The order of some of these tests is important. */ switch (headerp->rm_type) { case htonl(RDMA_MSG): /* never expect read chunks */ /* never expect reply chunks (two ways to check) */ /* never expect write chunks without having offered RDMA */ if (headerp->rm_body.rm_chunks[0] != xdr_zero || (headerp->rm_body.rm_chunks[1] == xdr_zero && headerp->rm_body.rm_chunks[2] != xdr_zero) || (headerp->rm_body.rm_chunks[1] != xdr_zero && req->rl_nchunks == 0)) goto badheader; if (headerp->rm_body.rm_chunks[1] != xdr_zero) { /* count any expected write chunks in read reply */ /* start at write chunk array count */ iptr = &headerp->rm_body.rm_chunks[2]; rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 1, &iptr); /* check for validity, and no reply chunk after */ if (rdmalen < 0 || *iptr++ != xdr_zero) goto badheader; rep->rr_len -= ((unsigned char *)iptr - (unsigned char *)headerp); status = rep->rr_len + rdmalen; r_xprt->rx_stats.total_rdma_reply += rdmalen; /* special case - last chunk may omit padding */ if (rdmalen &= 3) { rdmalen = 4 - rdmalen; status += rdmalen; } } else { /* else ordinary inline */ rdmalen = 0; iptr = (__be32 *)((unsigned char *)headerp + 28); rep->rr_len -= 28; /*sizeof *headerp;*/ status = rep->rr_len; } /* Fix up the rpc results for upper layer */ rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); break; case htonl(RDMA_NOMSG): /* never expect read or write chunks, always reply chunks */ if (headerp->rm_body.rm_chunks[0] != xdr_zero || headerp->rm_body.rm_chunks[1] != xdr_zero || headerp->rm_body.rm_chunks[2] != xdr_one || req->rl_nchunks == 0) goto badheader; iptr = (__be32 *)((unsigned char *)headerp + 28); rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); if (rdmalen < 0) goto badheader; r_xprt->rx_stats.total_rdma_reply += rdmalen; /* Reply chunk buffer already is the reply vector - no fixup. */ status = rdmalen; break; badheader: default: dprintk("%s: invalid rpcrdma reply header (type %d):" " chunks[012] == %d %d %d" " expected chunks <= %d\n", __func__, ntohl(headerp->rm_type), headerp->rm_body.rm_chunks[0], headerp->rm_body.rm_chunks[1], headerp->rm_body.rm_chunks[2], req->rl_nchunks); status = -EIO; r_xprt->rx_stats.bad_reply_count++; break; } /* If using mw bind, start the deregister process now. */ /* (Note: if mr_free(), cannot perform it here, in tasklet context) */ if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) { case RPCRDMA_MEMWINDOWS: for (i = 0; req->rl_nchunks-- > 1;) i += rpcrdma_deregister_external( &req->rl_segments[i], r_xprt, NULL); /* Optionally wait (not here) for unbinds to complete */ rep->rr_func = rpcrdma_unbind_func; (void) rpcrdma_deregister_external(&req->rl_segments[i], r_xprt, rep); break; case RPCRDMA_MEMWINDOWS_ASYNC: for (i = 0; req->rl_nchunks--;) i += rpcrdma_deregister_external(&req->rl_segments[i], r_xprt, NULL); break; default: break; } dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n", __func__, xprt, rqst, status); xprt_complete_rqst(rqst->rq_task, status); spin_unlock(&xprt->transport_lock); }
gpl-2.0
ShinySide/HispAsian_S5
fs/nfsd/nfsxdr.c
8160
13276
/* * XDR support for nfsd * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include "xdr.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs_ftypes[] = { NFNON, NFCHR, NFCHR, NFBAD, NFDIR, NFBAD, NFBLK, NFBAD, NFREG, NFBAD, NFLNK, NFBAD, NFSOCK, NFBAD, NFLNK, NFBAD, }; /* * XDR functions for basic NFS types */ static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { fh_init(fhp, NFS_FHSIZE); memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE); fhp->fh_handle.fh_size = NFS_FHSIZE; /* FIXME: Look up export pointer here and verify * Sun Secure RPC if requested */ return p + (NFS_FHSIZE >> 2); } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); return p + (NFS_FHSIZE>> 2); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_pathname(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXPATHLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0') return NULL; } } return p; } static __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; iap->ia_valid = 0; /* Sun client bug compatibility check: some sun clients seem to * put 0xffff in the mode field when they mean 0xffffffff. * Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah. */ if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_UID; iap->ia_uid = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_GID; iap->ia_gid = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_SIZE; iap->ia_size = tmp; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = tmp; iap->ia_atime.tv_nsec = tmp1 * 1000; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = tmp; iap->ia_mtime.tv_nsec = tmp1 * 1000; /* * Passing the invalid value useconds=1000000 for mtime * is a Sun convention for "set both mtime and atime to * current server time". It's needed to make permissions * checks for the "touch" program across v2 mounts to * Solaris and Irix boxes work correctly. See description of * sattr in section 6.1 of "NFS Illustrated" by * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 */ if (tmp1 == 1000000) iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET); } return p; } static __be32 * encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { struct dentry *dentry = fhp->fh_dentry; int type; struct timespec time; u32 f; type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { *p++ = htonl((u32) stat->size); } *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); *p++ = htonl((u32) stat->blocks); switch (fsid_source(fhp)) { default: case FSIDSOURCE_DEV: *p++ = htonl(new_encode_dev(stat->dev)); break; case FSIDSOURCE_FSID: *p++ = htonl((u32) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u32*)fhp->fh_export->ex_uuid)[0]; f ^= ((u32*)fhp->fh_export->ex_uuid)[1]; f ^= ((u32*)fhp->fh_export->ex_uuid)[2]; f ^= ((u32*)fhp->fh_export->ex_uuid)[3]; *p++ = htonl(f); break; } *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); lease_get_mtime(dentry->d_inode, &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct kstat stat; vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat); return encode_fattr(rqstp, p, fhp, &stat); } /* * XDR decode functions */ int nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { if (!(p = decode_fh(p, &args->fh))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_sattrargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readargs *args) { unsigned int len; int v,pn; if (!(p = decode_fh(p, &args->fh))) return 0; args->offset = ntohl(*p++); len = args->count = ntohl(*p++); p++; /* totalcount - unused */ if (len > NFSSVC_MAXBLKSIZE_V2) len = NFSSVC_MAXBLKSIZE_V2; /* set up somewhere to store response. * We take pages, put them on reslist and include in iovec */ v=0; while (len > 0) { pn = rqstp->rq_resused++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]); rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE; len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_writeargs *args) { unsigned int len, hdr, dlen; int v; if (!(p = decode_fh(p, &args->fh))) return 0; p++; /* beginoffset */ args->offset = ntohl(*p++); /* offset */ p++; /* totalcount */ len = args->len = ntohl(*p++); /* * The protocol specifies a maximum of 8192 bytes. */ if (len > NFSSVC_MAXBLKSIZE_V2) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_createargs *args) { if ( !(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_symlinkargs *args) { if ( !(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_pathname(p, &args->tname, &args->tlen))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->cookie = ntohl(*p++); args->count = ntohl(*p++); if (args->count > PAGE_SIZE) args->count = PAGE_SIZE; args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ int nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } int nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropres *resp) { p = encode_fh(p, &resp->fh); p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkres *resp) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } int nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readres *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->count); xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); } return 1; } int nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirres *resp) { xdr_ressize_check(rqstp, p); p = resp->buffer; *p++ = 0; /* no more entries */ *p++ = htonl((resp->common.err == nfserr_eof)); rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1; return 1; } int nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_statfsres *resp) { struct kstatfs *stat = &resp->stats; *p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */ *p++ = htonl(stat->f_bsize); *p++ = htonl(stat->f_blocks); *p++ = htonl(stat->f_bfree); *p++ = htonl(stat->f_bavail); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_entry(void *ccdv, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_cd *ccd = ccdv; struct nfsd_readdirres *cd = container_of(ccd, struct nfsd_readdirres, common); __be32 *p = cd->buffer; int buflen, slen; /* dprintk("nfsd: entry(%.*s off %ld ino %ld)\n", namlen, name, offset, ino); */ if (offset > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } if (cd->offset) *cd->offset = htonl(offset); if (namlen > NFS2_MAXNAMLEN) namlen = NFS2_MAXNAMLEN;/* truncate filename */ slen = XDR_QUADLEN(namlen); if ((buflen = cd->buflen - slen - 4) < 0) { cd->common.err = nfserr_toosmall; return -EINVAL; } if (ino > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } *p++ = xdr_one; /* mark entry present */ *p++ = htonl((u32) ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ *p++ = htonl(~0U); /* offset of next entry */ cd->buflen = buflen; cd->buffer = p; cd->common.err = nfs_ok; return 0; } /* * XDR release functions */ int nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *resp) { fh_put(&resp->fh); return 1; }
gpl-2.0
kraatus90/MotoX_Kernel
fs/nfsd/nfsxdr.c
8160
13276
/* * XDR support for nfsd * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include "xdr.h" #include "auth.h" #define NFSDDBG_FACILITY NFSDDBG_XDR /* * Mapping of S_IF* types to NFS file types */ static u32 nfs_ftypes[] = { NFNON, NFCHR, NFCHR, NFBAD, NFDIR, NFBAD, NFBLK, NFBAD, NFREG, NFBAD, NFLNK, NFBAD, NFSOCK, NFBAD, NFLNK, NFBAD, }; /* * XDR functions for basic NFS types */ static __be32 * decode_fh(__be32 *p, struct svc_fh *fhp) { fh_init(fhp, NFS_FHSIZE); memcpy(&fhp->fh_handle.fh_base, p, NFS_FHSIZE); fhp->fh_handle.fh_size = NFS_FHSIZE; /* FIXME: Look up export pointer here and verify * Sun Secure RPC if requested */ return p + (NFS_FHSIZE >> 2); } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp) { return decode_fh(p, fhp); } static __be32 * encode_fh(__be32 *p, struct svc_fh *fhp) { memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE); return p + (NFS_FHSIZE>> 2); } /* * Decode a file name and make sure that the path contains * no slashes or null bytes. */ static __be32 * decode_filename(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXNAMLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0' || *name == '/') return NULL; } } return p; } static __be32 * decode_pathname(__be32 *p, char **namp, unsigned int *lenp) { char *name; unsigned int i; if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS_MAXPATHLEN)) != NULL) { for (i = 0, name = *namp; i < *lenp; i++, name++) { if (*name == '\0') return NULL; } } return p; } static __be32 * decode_sattr(__be32 *p, struct iattr *iap) { u32 tmp, tmp1; iap->ia_valid = 0; /* Sun client bug compatibility check: some sun clients seem to * put 0xffff in the mode field when they mean 0xffffffff. * Quoting the 4.4BSD nfs server code: Nah nah nah nah na nah. */ if ((tmp = ntohl(*p++)) != (u32)-1 && tmp != 0xffff) { iap->ia_valid |= ATTR_MODE; iap->ia_mode = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_UID; iap->ia_uid = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_GID; iap->ia_gid = tmp; } if ((tmp = ntohl(*p++)) != (u32)-1) { iap->ia_valid |= ATTR_SIZE; iap->ia_size = tmp; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; iap->ia_atime.tv_sec = tmp; iap->ia_atime.tv_nsec = tmp1 * 1000; } tmp = ntohl(*p++); tmp1 = ntohl(*p++); if (tmp != (u32)-1 && tmp1 != (u32)-1) { iap->ia_valid |= ATTR_MTIME | ATTR_MTIME_SET; iap->ia_mtime.tv_sec = tmp; iap->ia_mtime.tv_nsec = tmp1 * 1000; /* * Passing the invalid value useconds=1000000 for mtime * is a Sun convention for "set both mtime and atime to * current server time". It's needed to make permissions * checks for the "touch" program across v2 mounts to * Solaris and Irix boxes work correctly. See description of * sattr in section 6.1 of "NFS Illustrated" by * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 */ if (tmp1 == 1000000) iap->ia_valid &= ~(ATTR_ATIME_SET|ATTR_MTIME_SET); } return p; } static __be32 * encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat) { struct dentry *dentry = fhp->fh_dentry; int type; struct timespec time; u32 f; type = (stat->mode & S_IFMT); *p++ = htonl(nfs_ftypes[type >> 12]); *p++ = htonl((u32) stat->mode); *p++ = htonl((u32) stat->nlink); *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid)); *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid)); if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) { *p++ = htonl(NFS_MAXPATHLEN); } else { *p++ = htonl((u32) stat->size); } *p++ = htonl((u32) stat->blksize); if (S_ISCHR(type) || S_ISBLK(type)) *p++ = htonl(new_encode_dev(stat->rdev)); else *p++ = htonl(0xffffffff); *p++ = htonl((u32) stat->blocks); switch (fsid_source(fhp)) { default: case FSIDSOURCE_DEV: *p++ = htonl(new_encode_dev(stat->dev)); break; case FSIDSOURCE_FSID: *p++ = htonl((u32) fhp->fh_export->ex_fsid); break; case FSIDSOURCE_UUID: f = ((u32*)fhp->fh_export->ex_uuid)[0]; f ^= ((u32*)fhp->fh_export->ex_uuid)[1]; f ^= ((u32*)fhp->fh_export->ex_uuid)[2]; f ^= ((u32*)fhp->fh_export->ex_uuid)[3]; *p++ = htonl(f); break; } *p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); lease_get_mtime(dentry->d_inode, &time); *p++ = htonl((u32) time.tv_sec); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl((u32) stat->ctime.tv_sec); *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0); return p; } /* Helper function for NFSv2 ACL code */ __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) { struct kstat stat; vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &stat); return encode_fattr(rqstp, p, fhp, &stat); } /* * XDR decode functions */ int nfssvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nfssvc_decode_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *args) { if (!(p = decode_fh(p, &args->fh))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_sattrargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_sattrargs *args) { p = decode_fh(p, &args->fh); if (!p) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropargs *args) { if (!(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readargs *args) { unsigned int len; int v,pn; if (!(p = decode_fh(p, &args->fh))) return 0; args->offset = ntohl(*p++); len = args->count = ntohl(*p++); p++; /* totalcount - unused */ if (len > NFSSVC_MAXBLKSIZE_V2) len = NFSSVC_MAXBLKSIZE_V2; /* set up somewhere to store response. * We take pages, put them on reslist and include in iovec */ v=0; while (len > 0) { pn = rqstp->rq_resused++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]); rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE; len -= rqstp->rq_vec[v].iov_len; v++; } args->vlen = v; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_writeargs *args) { unsigned int len, hdr, dlen; int v; if (!(p = decode_fh(p, &args->fh))) return 0; p++; /* beginoffset */ args->offset = ntohl(*p++); /* offset */ p++; /* totalcount */ len = args->len = ntohl(*p++); /* * The protocol specifies a maximum of 8192 bytes. */ if (len > NFSSVC_MAXBLKSIZE_V2) return 0; /* * Check to make sure that we got the right number of * bytes. */ hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len - hdr; /* * Round the length of the data which was specified up to * the next multiple of XDR units and then compare that * against the length which was actually received. * Note that when RPCSEC/GSS (for example) is used, the * data buffer can be padded so dlen might be larger * than required. It must never be smaller. */ if (dlen < XDR_QUADLEN(len)*4) return 0; rqstp->rq_vec[0].iov_base = (void*)p; rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; v = 0; while (len > rqstp->rq_vec[v].iov_len) { len -= rqstp->rq_vec[v].iov_len; v++; rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]); rqstp->rq_vec[v].iov_len = PAGE_SIZE; } rqstp->rq_vec[v].iov_len = len; args->vlen = v + 1; return 1; } int nfssvc_decode_createargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_createargs *args) { if ( !(p = decode_fh(p, &args->fh)) || !(p = decode_filename(p, &args->name, &args->len))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_renameargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_renameargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_linkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_linkargs *args) { if (!(p = decode_fh(p, &args->ffh)) || !(p = decode_fh(p, &args->tfh)) || !(p = decode_filename(p, &args->tname, &args->tlen))) return 0; return xdr_argsize_check(rqstp, p); } int nfssvc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_symlinkargs *args) { if ( !(p = decode_fh(p, &args->ffh)) || !(p = decode_filename(p, &args->fname, &args->flen)) || !(p = decode_pathname(p, &args->tname, &args->tlen))) return 0; p = decode_sattr(p, &args->attrs); return xdr_argsize_check(rqstp, p); } int nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirargs *args) { if (!(p = decode_fh(p, &args->fh))) return 0; args->cookie = ntohl(*p++); args->count = ntohl(*p++); if (args->count > PAGE_SIZE) args->count = PAGE_SIZE; args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ int nfssvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } int nfssvc_encode_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_diropres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_diropres *resp) { p = encode_fh(p, &resp->fh); p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_readlinkres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readlinkres *resp) { *p++ = htonl(resp->len); xdr_ressize_check(rqstp, p); rqstp->rq_res.page_len = resp->len; if (resp->len & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); } return 1; } int nfssvc_encode_readres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readres *resp) { p = encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->count); xdr_ressize_check(rqstp, p); /* now update rqstp->rq_res to reflect data as well */ rqstp->rq_res.page_len = resp->count; if (resp->count & 3) { /* need to pad the tail */ rqstp->rq_res.tail[0].iov_base = p; *p = 0; rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); } return 1; } int nfssvc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readdirres *resp) { xdr_ressize_check(rqstp, p); p = resp->buffer; *p++ = 0; /* no more entries */ *p++ = htonl((resp->common.err == nfserr_eof)); rqstp->rq_res.page_len = (((unsigned long)p-1) & ~PAGE_MASK)+1; return 1; } int nfssvc_encode_statfsres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_statfsres *resp) { struct kstatfs *stat = &resp->stats; *p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */ *p++ = htonl(stat->f_bsize); *p++ = htonl(stat->f_blocks); *p++ = htonl(stat->f_bfree); *p++ = htonl(stat->f_bavail); return xdr_ressize_check(rqstp, p); } int nfssvc_encode_entry(void *ccdv, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_cd *ccd = ccdv; struct nfsd_readdirres *cd = container_of(ccd, struct nfsd_readdirres, common); __be32 *p = cd->buffer; int buflen, slen; /* dprintk("nfsd: entry(%.*s off %ld ino %ld)\n", namlen, name, offset, ino); */ if (offset > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } if (cd->offset) *cd->offset = htonl(offset); if (namlen > NFS2_MAXNAMLEN) namlen = NFS2_MAXNAMLEN;/* truncate filename */ slen = XDR_QUADLEN(namlen); if ((buflen = cd->buflen - slen - 4) < 0) { cd->common.err = nfserr_toosmall; return -EINVAL; } if (ino > ~((u32) 0)) { cd->common.err = nfserr_fbig; return -EINVAL; } *p++ = xdr_one; /* mark entry present */ *p++ = htonl((u32) ino); /* file id */ p = xdr_encode_array(p, name, namlen);/* name length & name */ cd->offset = p; /* remember pointer */ *p++ = htonl(~0U); /* offset of next entry */ cd->buflen = buflen; cd->buffer = p; cd->common.err = nfs_ok; return 0; } /* * XDR release functions */ int nfssvc_release_fhandle(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *resp) { fh_put(&resp->fh); return 1; }
gpl-2.0
aeroevan/vivo_w-kernel
drivers/media/video/pvrusb2/pvrusb2-encoder.c
9440
14688
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/device.h> // for linux/firmware.h #include <linux/firmware.h> #include "pvrusb2-util.h" #include "pvrusb2-encoder.h" #include "pvrusb2-hdw-internal.h" #include "pvrusb2-debug.h" #include "pvrusb2-fx2-cmd.h" /* Firmware mailbox flags - definitions found from ivtv */ #define IVTV_MBOX_FIRMWARE_DONE 0x00000004 #define IVTV_MBOX_DRIVER_DONE 0x00000002 #define IVTV_MBOX_DRIVER_BUSY 0x00000001 #define MBOX_BASE 0x44 static int pvr2_encoder_write_words(struct pvr2_hdw *hdw, unsigned int offs, const u32 *data, unsigned int dlen) { unsigned int idx,addr; unsigned int bAddr; int ret; unsigned int chunkCnt; /* Format: First byte must be 0x01. Remaining 32 bit words are spread out into chunks of 7 bytes each, with the first 4 bytes being the data word (little endian), and the next 3 bytes being the address where that data word is to be written (big endian). Repeat request for additional words, with offset adjusted accordingly. */ while (dlen) { chunkCnt = 8; if (chunkCnt > dlen) chunkCnt = dlen; memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer)); bAddr = 0; hdw->cmd_buffer[bAddr++] = FX2CMD_MEM_WRITE_DWORD; for (idx = 0; idx < chunkCnt; idx++) { addr = idx + offs; hdw->cmd_buffer[bAddr+6] = (addr & 0xffu); hdw->cmd_buffer[bAddr+5] = ((addr>>8) & 0xffu); hdw->cmd_buffer[bAddr+4] = ((addr>>16) & 0xffu); PVR2_DECOMPOSE_LE(hdw->cmd_buffer, bAddr,data[idx]); bAddr += 7; } ret = pvr2_send_request(hdw, hdw->cmd_buffer,1+(chunkCnt*7), NULL,0); if (ret) return ret; data += chunkCnt; dlen -= chunkCnt; offs += chunkCnt; } return 0; } static int pvr2_encoder_read_words(struct pvr2_hdw *hdw, unsigned int offs, u32 *data, unsigned int dlen) { unsigned int idx; int ret; unsigned int chunkCnt; /* Format: First byte must be 0x02 (status check) or 0x28 (read back block of 32 bit words). Next 6 bytes must be zero, followed by a single byte of MBOX_BASE+offset for portion to be read. Returned data is packed set of 32 bits words that were read. */ while (dlen) { chunkCnt = 16; if (chunkCnt > dlen) chunkCnt = dlen; if (chunkCnt < 16) chunkCnt = 1; hdw->cmd_buffer[0] = ((chunkCnt == 1) ? FX2CMD_MEM_READ_DWORD : FX2CMD_MEM_READ_64BYTES); hdw->cmd_buffer[1] = 0; hdw->cmd_buffer[2] = 0; hdw->cmd_buffer[3] = 0; hdw->cmd_buffer[4] = 0; hdw->cmd_buffer[5] = ((offs>>16) & 0xffu); hdw->cmd_buffer[6] = ((offs>>8) & 0xffu); hdw->cmd_buffer[7] = (offs & 0xffu); ret = pvr2_send_request(hdw, hdw->cmd_buffer,8, hdw->cmd_buffer, (chunkCnt == 1 ? 4 : 16 * 4)); if (ret) return ret; for (idx = 0; idx < chunkCnt; idx++) { data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4); } data += chunkCnt; dlen -= chunkCnt; offs += chunkCnt; } return 0; } /* This prototype is set up to be compatible with the cx2341x_mbox_func prototype in cx2341x.h, which should be in kernels 2.6.18 or later. We do this so that we can enable cx2341x.ko to write to our encoder (by handing it a pointer to this function). For earlier kernels this doesn't really matter. */ static int pvr2_encoder_cmd(void *ctxt, u32 cmd, int arg_cnt_send, int arg_cnt_recv, u32 *argp) { unsigned int poll_count; unsigned int try_count = 0; int retry_flag; int ret = 0; unsigned int idx; /* These sizes look to be limited by the FX2 firmware implementation */ u32 wrData[16]; u32 rdData[16]; struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt; /* The encoder seems to speak entirely using blocks 32 bit words. In ivtv driver terms, this is a mailbox at MBOX_BASE which we populate with data and watch what the hardware does with it. The first word is a set of flags used to control the transaction, the second word is the command to execute, the third byte is zero (ivtv driver suggests that this is some kind of return value), and the fourth byte is a specified timeout (windows driver always uses 0x00060000 except for one case when it is zero). All successive words are the argument words for the command. First, write out the entire set of words, with the first word being zero. Next, write out just the first word again, but set it to IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which probably means "go"). Next, read back the return count words. Check the first word, which should have IVTV_MBOX_FIRMWARE_DONE set. If however that bit is not set, then the command isn't done so repeat the read until it is set. Finally, write out just the first word again, but set it to 0x0 this time (which probably means "idle"). */ if (arg_cnt_send > (ARRAY_SIZE(wrData) - 4)) { pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Failed to write cx23416 command" " - too many input arguments" " (was given %u limit %lu)", arg_cnt_send, (long unsigned) ARRAY_SIZE(wrData) - 4); return -EINVAL; } if (arg_cnt_recv > (ARRAY_SIZE(rdData) - 4)) { pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Failed to write cx23416 command" " - too many return arguments" " (was given %u limit %lu)", arg_cnt_recv, (long unsigned) ARRAY_SIZE(rdData) - 4); return -EINVAL; } LOCK_TAKE(hdw->ctl_lock); do { if (!hdw->state_encoder_ok) { ret = -EIO; break; } retry_flag = 0; try_count++; ret = 0; wrData[0] = 0; wrData[1] = cmd; wrData[2] = 0; wrData[3] = 0x00060000; for (idx = 0; idx < arg_cnt_send; idx++) { wrData[idx+4] = argp[idx]; } for (; idx < ARRAY_SIZE(wrData) - 4; idx++) { wrData[idx+4] = 0; } ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,idx); if (ret) break; wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY; ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1); if (ret) break; poll_count = 0; while (1) { poll_count++; ret = pvr2_encoder_read_words(hdw,MBOX_BASE,rdData, arg_cnt_recv+4); if (ret) { break; } if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) { break; } if (rdData[0] && (poll_count < 1000)) continue; if (!rdData[0]) { retry_flag = !0; pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Encoder timed out waiting for us" "; arranging to retry"); } else { pvr2_trace( PVR2_TRACE_ERROR_LEGS, "***WARNING*** device's encoder" " appears to be stuck" " (status=0x%08x)",rdData[0]); } pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Encoder command: 0x%02x",cmd); for (idx = 4; idx < arg_cnt_send; idx++) { pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Encoder arg%d: 0x%08x", idx-3,wrData[idx]); } ret = -EBUSY; break; } if (retry_flag) { if (try_count < 20) continue; pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Too many retries..."); ret = -EBUSY; } if (ret) { del_timer_sync(&hdw->encoder_run_timer); hdw->state_encoder_ok = 0; pvr2_trace(PVR2_TRACE_STBITS, "State bit %s <-- %s", "state_encoder_ok", (hdw->state_encoder_ok ? "true" : "false")); if (hdw->state_encoder_runok) { hdw->state_encoder_runok = 0; pvr2_trace(PVR2_TRACE_STBITS, "State bit %s <-- %s", "state_encoder_runok", (hdw->state_encoder_runok ? "true" : "false")); } pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Giving up on command." " This is normally recovered via a firmware" " reload and re-initialization; concern" " is only warranted if this happens repeatedly" " and rapidly."); break; } wrData[0] = 0x7; for (idx = 0; idx < arg_cnt_recv; idx++) { argp[idx] = rdData[idx+4]; } wrData[0] = 0x0; ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1); if (ret) break; } while(0); LOCK_GIVE(hdw->ctl_lock); return ret; } static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd, int args, ...) { va_list vl; unsigned int idx; u32 data[12]; if (args > ARRAY_SIZE(data)) { pvr2_trace( PVR2_TRACE_ERROR_LEGS, "Failed to write cx23416 command" " - too many arguments" " (was given %u limit %lu)", args, (long unsigned) ARRAY_SIZE(data)); return -EINVAL; } va_start(vl, args); for (idx = 0; idx < args; idx++) { data[idx] = va_arg(vl, u32); } va_end(vl); return pvr2_encoder_cmd(hdw,cmd,args,0,data); } /* This implements some extra setup for the encoder that seems to be specific to the PVR USB2 hardware. */ static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw) { int ret = 0; int encMisc3Arg = 0; #if 0 /* This inexplicable bit happens in the Hauppauge windows driver (for both 24xxx and 29xxx devices). However I currently see no difference in behavior with or without this stuff. Leave this here as a note of its existence, but don't use it. */ LOCK_TAKE(hdw->ctl_lock); do { u32 dat[1]; dat[0] = 0x80000640; pvr2_encoder_write_words(hdw,0x01fe,dat,1); pvr2_encoder_write_words(hdw,0x023e,dat,1); } while(0); LOCK_GIVE(hdw->ctl_lock); #endif /* Mike Isely <isely@pobox.com> 26-Jan-2006 The windows driver sends the following list of ENC_MISC commands (for both 24xxx and 29xxx devices). Meanings are not entirely clear, however without the ENC_MISC(3,1) command then we risk random perpetual video corruption whenever the video input breaks up for a moment (like when switching channels). */ #if 0 /* This ENC_MISC(5,0) command seems to hurt 29xxx sync performance on channel changes, but is not a problem on 24xxx devices. */ ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 5,0,0,0); #endif /* This ENC_MISC(3,encMisc3Arg) command is critical - without it there will eventually be video corruption. Also, the saa7115 case is strange - the Windows driver is passing 1 regardless of device type but if we have 1 for saa7115 devices the video turns sluggish. */ if (hdw->hdw_desc->flag_has_cx25840) { encMisc3Arg = 1; } else { encMisc3Arg = 0; } ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 3, encMisc3Arg,0,0); ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 8,0,0,0); #if 0 /* This ENC_MISC(4,1) command is poisonous, so it is commented out. But I'm leaving it here anyway to document its existence in the Windows driver. The effect of this command is that apps displaying the stream become sluggish with stuttering video. */ ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 4,1,0,0); #endif ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0); ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0); /* prevent the PTSs from slowly drifting away in the generated MPEG stream */ ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1); return ret; } int pvr2_encoder_adjust(struct pvr2_hdw *hdw) { int ret; ret = cx2341x_update(hdw,pvr2_encoder_cmd, (hdw->enc_cur_valid ? &hdw->enc_cur_state : NULL), &hdw->enc_ctl_state); if (ret) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Error from cx2341x module code=%d",ret); } else { memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state, sizeof(struct cx2341x_mpeg_params)); hdw->enc_cur_valid = !0; } return ret; } int pvr2_encoder_configure(struct pvr2_hdw *hdw) { int ret; int val; pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure" " (cx2341x module)"); hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING; hdw->enc_ctl_state.width = hdw->res_hor_val; hdw->enc_ctl_state.height = hdw->res_ver_val; hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur & V4L2_STD_525_60) ? 0 : 1); ret = 0; ret |= pvr2_encoder_prep_config(hdw); /* saa7115: 0xf0 */ val = 0xf0; if (hdw->hdw_desc->flag_has_cx25840) { /* ivtv cx25840: 0x140 */ val = 0x140; } if (!ret) ret = pvr2_encoder_vcmd( hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, val, val); /* setup firmware to notify us about some events (don't know why...) */ if (!ret) ret = pvr2_encoder_vcmd( hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, 0x10000000, 0xffffffff); if (!ret) ret = pvr2_encoder_vcmd( hdw,CX2341X_ENC_SET_VBI_LINE, 5, 0xffffffff,0,0,0,0); if (ret) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to configure cx23416"); return ret; } ret = pvr2_encoder_adjust(hdw); if (ret) return ret; ret = pvr2_encoder_vcmd( hdw, CX2341X_ENC_INITIALIZE_INPUT, 0); if (ret) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to initialize cx23416 video input"); return ret; } return 0; } int pvr2_encoder_start(struct pvr2_hdw *hdw) { int status; /* unmask some interrupts */ pvr2_write_register(hdw, 0x0048, 0xbfffffff); pvr2_encoder_vcmd(hdw,CX2341X_ENC_MUTE_VIDEO,1, hdw->input_val == PVR2_CVAL_INPUT_RADIO ? 1 : 0); switch (hdw->active_stream_type) { case pvr2_config_vbi: status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, 0x01,0x14); break; case pvr2_config_mpeg: status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, 0,0x13); break; default: /* Unhandled cases for now */ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2, 0,0x13); break; } return status; } int pvr2_encoder_stop(struct pvr2_hdw *hdw) { int status; /* mask all interrupts */ pvr2_write_register(hdw, 0x0048, 0xffffffff); switch (hdw->active_stream_type) { case pvr2_config_vbi: status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, 0x01,0x01,0x14); break; case pvr2_config_mpeg: status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, 0x01,0,0x13); break; default: /* Unhandled cases for now */ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3, 0x01,0,0x13); break; } return status; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
tako0910/android_kernel_htc_m7wlj
fs/ubifs/compress.c
9952
6772
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Adrian Hunter * Artem Bityutskiy (Битюцкий Артём) * Zoltan Sogor */ /* * This file provides a single place to access to compression and * decompression. */ #include <linux/crypto.h> #include "ubifs.h" /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, .name = "none", .capi_name = "", }; #ifdef CONFIG_UBIFS_FS_LZO static DEFINE_MUTEX(lzo_mutex); static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .comp_mutex = &lzo_mutex, .name = "lzo", .capi_name = "lzo", }; #else static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .name = "lzo", }; #endif #ifdef CONFIG_UBIFS_FS_ZLIB static DEFINE_MUTEX(deflate_mutex); static DEFINE_MUTEX(inflate_mutex); static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .comp_mutex = &deflate_mutex, .decomp_mutex = &inflate_mutex, .name = "zlib", .capi_name = "deflate", }; #else static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, .name = "zlib", }; #endif /* All UBIFS compressors */ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; /** * ubifs_compress - compress data. * @in_buf: data to compress * @in_len: length of the data to compress * @out_buf: output buffer where compressed data should be stored * @out_len: output buffer length is returned here * @compr_type: type of compression to use on enter, actually used compression * type on exit * * This function compresses input buffer @in_buf of length @in_len and stores * the result in the output buffer @out_buf and the resulting length in * @out_len. If the input buffer does not compress, it is just copied to the * @out_buf. The same happens if @compr_type is %UBIFS_COMPR_NONE or if * compression error occurred. * * Note, if the input buffer was not compressed, it is copied to the output * buffer and %UBIFS_COMPR_NONE is returned in @compr_type. */ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type) { int err; struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; if (*compr_type == UBIFS_COMPR_NONE) goto no_compr; /* If the input data is small, do not even try to compress it */ if (in_len < UBIFS_MIN_COMPR_LEN) goto no_compr; if (compr->comp_mutex) mutex_lock(compr->comp_mutex); err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { ubifs_warn("cannot compress %d bytes, compressor %s, " "error %d, leave data uncompressed", in_len, compr->name, err); goto no_compr; } /* * If the data compressed only slightly, it is better to leave it * uncompressed to improve read speed. */ if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) goto no_compr; return; no_compr: memcpy(out_buf, in_buf, in_len); *out_len = in_len; *compr_type = UBIFS_COMPR_NONE; } /** * ubifs_decompress - decompress data. * @in_buf: data to decompress * @in_len: length of the data to decompress * @out_buf: output buffer where decompressed data should * @out_len: output length is returned here * @compr_type: type of compression * * This function decompresses data from buffer @in_buf into buffer @out_buf. * The length of the uncompressed data is returned in @out_len. This functions * returns %0 on success or a negative error code on failure. */ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { ubifs_err("invalid compression type %d", compr_type); return -EINVAL; } compr = ubifs_compressors[compr_type]; if (unlikely(!compr->capi_name)) { ubifs_err("%s compression is not compiled in", compr->name); return -EINVAL; } if (compr_type == UBIFS_COMPR_NONE) { memcpy(out_buf, in_buf, in_len); *out_len = in_len; return 0; } if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) ubifs_err("cannot decompress %d bytes, compressor %s, " "error %d", in_len, compr->name, err); return err; } /** * compr_init - initialize a compressor. * @compr: compressor description object * * This function initializes the requested compressor and returns zero in case * of success or a negative error code in case of failure. */ static int __init compr_init(struct ubifs_compressor *compr) { if (compr->capi_name) { compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); if (IS_ERR(compr->cc)) { ubifs_err("cannot initialize compressor %s, error %ld", compr->name, PTR_ERR(compr->cc)); return PTR_ERR(compr->cc); } } ubifs_compressors[compr->compr_type] = compr; return 0; } /** * compr_exit - de-initialize a compressor. * @compr: compressor description object */ static void compr_exit(struct ubifs_compressor *compr) { if (compr->capi_name) crypto_free_comp(compr->cc); return; } /** * ubifs_compressors_init - initialize UBIFS compressors. * * This function initializes the compressor which were compiled in. Returns * zero in case of success and a negative error code in case of failure. */ int __init ubifs_compressors_init(void) { int err; err = compr_init(&lzo_compr); if (err) return err; err = compr_init(&zlib_compr); if (err) goto out_lzo; ubifs_compressors[UBIFS_COMPR_NONE] = &none_compr; return 0; out_lzo: compr_exit(&lzo_compr); return err; } /** * ubifs_compressors_exit - de-initialize UBIFS compressors. */ void ubifs_compressors_exit(void) { compr_exit(&lzo_compr); compr_exit(&zlib_compr); }
gpl-2.0
ccompiler4pic32/pic32-gcc
gcc/testsuite/gcc.c-torture/compile/980408-1.c
225
4083
typedef struct _RunlengthPacket { unsigned short red, green, blue, length; unsigned short index; } RunlengthPacket; typedef struct _Image { int status, temporary; char filename[1664 ]; long int filesize; int pipe; char magick[1664 ], *comments, *label, *text; unsigned int matte; unsigned int columns, rows, depth; unsigned int scene, number_scenes; char *montage, *directory; unsigned int colors; double gamma; float x_resolution, y_resolution; unsigned int mean_error_per_pixel; double normalized_mean_error, normalized_maximum_error; unsigned long total_colors; char *signature; unsigned int packets, runlength, packet_size; unsigned char *packed_pixels; long int magick_time; char magick_filename[1664 ]; unsigned int magick_columns, magick_rows; char *geometry, *page; unsigned int dispose, delay, iterations; unsigned int orphan; struct _Image *previous, *list, *next; } Image; Image *MinifyImage(Image *image) { Image *minified_image; register RunlengthPacket *q, *s, *s0, *s1, *s2, *s3; register unsigned int x; unsigned int blue, green, red; unsigned long total_matte, total_blue, total_green, total_red; unsigned short index; for (x=0; x < (image->columns-1); x+=2) { total_red=0; total_green=0; total_blue=0; total_matte=0; s=s0; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; s=s1; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; s=s2; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; s=s3; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; red=(unsigned short) ((total_red+63) >> 7); green=(unsigned short) ((total_green+63) >> 7); blue=(unsigned short) ((total_blue+63) >> 7); index=(unsigned short) ((total_matte+63) >> 7); if ((red == q->red) && (green == q->green) && (blue == q->blue) && (index == q->index) && ((int) q->length < 65535L )) q->length++; } return(minified_image); }
gpl-2.0
thepasto/kernel_acer_salsa
drivers/net/gianfar.c
481
86291
/* * drivers/net/gianfar.c * * Gianfar Ethernet Driver * This driver is designed for the non-CPM ethernet controllers * on the 85xx and 83xx family of integrated processors * Based on 8260_io/fcc_enet.c * * Author: Andy Fleming * Maintainer: Kumar Gala * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> * * Copyright 2002-2009 Freescale Semiconductor, Inc. * Copyright 2007 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Gianfar: AKA Lambda Draconis, "Dragon" * RA 11 31 24.2 * Dec +69 19 52 * V 3.84 * B-V +1.62 * * Theory of operation * * The driver is initialized through of_device. Configuration information * is therefore conveyed through an OF-style device tree. * * The Gianfar Ethernet Controller uses a ring of buffer * descriptors. The beginning is indicated by a register * pointing to the physical address of the start of the ring. * The end is determined by a "wrap" bit being set in the * last descriptor of the ring. * * When a packet is received, the RXF bit in the * IEVENT register is set, triggering an interrupt when the * corresponding bit in the IMASK register is also set (if * interrupt coalescing is active, then the interrupt may not * happen immediately, but will wait until either a set number * of frames or amount of time have passed). In NAPI, the * interrupt handler will signal there is work to be done, and * exit. This method will start at the last known empty * descriptor, and process every subsequent descriptor until there * are none left with data (NAPI will stop after a set number of * packets to give time to other tasks, but will eventually * process all the packets). The data arrives inside a * pre-allocated skb, and so after the skb is passed up to the * stack, a new skb must be allocated, and the address field in * the buffer descriptor must be updated to indicate this new * skb. * * When the kernel requests that a packet be transmitted, the * driver starts where it left off last time, and points the * descriptor at the buffer which was passed in. The driver * then informs the DMA engine that there are packets ready to * be transmitted. Once the controller is finished transmitting * the packet, an interrupt may be triggered (under the same * conditions as for reception, but depending on the TXF bit). * The driver then cleans up the buffer. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/net_tstamp.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/crc32.h> #include <linux/mii.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #include <linux/of.h> #include "gianfar.h" #include "fsl_pq_mdio.h" #define TX_TIMEOUT (1*HZ) #undef BRIEF_GFAR_ERRORS #undef VERBOSE_GFAR_ERRORS const char gfar_driver_name[] = "Gianfar Ethernet"; const char gfar_driver_version[] = "1.3"; static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev); static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); static irqreturn_t gfar_transmit(int irq, void *dev_id); static irqreturn_t gfar_interrupt(int irq, void *dev_id); static void adjust_link(struct net_device *dev); static void init_registers(struct net_device *dev); static int init_phy(struct net_device *dev); static int gfar_probe(struct of_device *ofdev, const struct of_device_id *match); static int gfar_remove(struct of_device *ofdev); static void free_skb_resources(struct gfar_private *priv); static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); static void gfar_configure_serdes(struct net_device *dev); static int gfar_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull); static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); void gfar_halt(struct net_device *dev); static void gfar_halt_nodisable(struct net_device *dev); void gfar_start(struct net_device *dev); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Gianfar Ethernet Driver"); MODULE_LICENSE("GPL"); static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, dma_addr_t buf) { u32 lstatus; bdp->bufPtr = buf; lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT); if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1) lstatus |= BD_LFLAG(RXBD_WRAP); eieio(); bdp->lstatus = lstatus; } static int gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; struct rxbd8 *rxbdp; int i, j; for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; /* Initialize some variables in our dev structure */ tx_queue->num_txbdfree = tx_queue->tx_ring_size; tx_queue->dirty_tx = tx_queue->tx_bd_base; tx_queue->cur_tx = tx_queue->tx_bd_base; tx_queue->skb_curtx = 0; tx_queue->skb_dirtytx = 0; /* Initialize Transmit Descriptor Ring */ txbdp = tx_queue->tx_bd_base; for (j = 0; j < tx_queue->tx_ring_size; j++) { txbdp->lstatus = 0; txbdp->bufPtr = 0; txbdp++; } /* Set the last descriptor in the ring to indicate wrap */ txbdp--; txbdp->status |= TXBD_WRAP; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; rx_queue->cur_rx = rx_queue->rx_bd_base; rx_queue->skb_currx = 0; rxbdp = rx_queue->rx_bd_base; for (j = 0; j < rx_queue->rx_ring_size; j++) { struct sk_buff *skb = rx_queue->rx_skbuff[j]; if (skb) { gfar_init_rxbdp(rx_queue, rxbdp, rxbdp->bufPtr); } else { skb = gfar_new_skb(ndev); if (!skb) { pr_err("%s: Can't allocate RX buffers\n", ndev->name); goto err_rxalloc_fail; } rx_queue->rx_skbuff[j] = skb; gfar_new_rxbdp(rx_queue, rxbdp, skb); } rxbdp++; } } return 0; err_rxalloc_fail: free_skb_resources(priv); return -ENOMEM; } static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; int i, j, k; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = &priv->ofdev->dev; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; priv->total_tx_ring_size = 0; for (i = 0; i < priv->num_tx_queues; i++) priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size; priv->total_rx_ring_size = 0; for (i = 0; i < priv->num_rx_queues; i++) priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size; /* Allocate memory for the buffer descriptors */ vaddr = dma_alloc_coherent(dev, sizeof(struct txbd8) * priv->total_tx_ring_size + sizeof(struct rxbd8) * priv->total_rx_ring_size, &addr, GFP_KERNEL); if (!vaddr) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate buffer descriptors!\n", ndev->name); return -ENOMEM; } for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; tx_queue->tx_bd_base = (struct txbd8 *) vaddr; tx_queue->tx_bd_dma_base = addr; tx_queue->dev = ndev; /* enet DMA only understands physical addresses */ addr += sizeof(struct txbd8) *tx_queue->tx_ring_size; vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size; } /* Start the rx descriptor ring where the tx ring leaves off */ for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = (struct rxbd8 *) vaddr; rx_queue->rx_bd_dma_base = addr; rx_queue->dev = ndev; addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size; } /* Setup the skbuff rings */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) * tx_queue->tx_ring_size, GFP_KERNEL); if (!tx_queue->tx_skbuff) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate tx_skbuff\n", ndev->name); goto cleanup; } for (k = 0; k < tx_queue->tx_ring_size; k++) tx_queue->tx_skbuff[k] = NULL; } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) * rx_queue->rx_ring_size, GFP_KERNEL); if (!rx_queue->rx_skbuff) { if (netif_msg_ifup(priv)) pr_err("%s: Could not allocate rx_skbuff\n", ndev->name); goto cleanup; } for (j = 0; j < rx_queue->rx_ring_size; j++) rx_queue->rx_skbuff[j] = NULL; } if (gfar_init_bds(ndev)) goto cleanup; return 0; cleanup: free_skb_resources(priv); return -ENOMEM; } static void gfar_init_tx_rx_base(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 __iomem *baddr; int i; baddr = &regs->tbase0; for(i = 0; i < priv->num_tx_queues; i++) { gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base); baddr += 2; } baddr = &regs->rbase0; for(i = 0; i < priv->num_rx_queues; i++) { gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base); baddr += 2; } } static void gfar_init_mac(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 rctrl = 0; u32 tctrl = 0; u32 attrs = 0; /* write the tx/rx base registers */ gfar_init_tx_rx_base(priv); /* Configure the coalescing support */ gfar_configure_coalescing(priv, 0xFF, 0xFF); if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN; /* Program the RIR0 reg with the required distribution */ gfar_write(&regs->rir0, DEFAULT_RIR0); } if (priv->rx_csum_enable) rctrl |= RCTRL_CHECKSUMMING; if (priv->extended_hash) { rctrl |= RCTRL_EXTHASH; gfar_clear_exact_match(ndev); rctrl |= RCTRL_EMEN; } if (priv->padding) { rctrl &= ~RCTRL_PAL_MASK; rctrl |= RCTRL_PADDING(priv->padding); } /* Insert receive time stamps into padding alignment bytes */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) { rctrl &= ~RCTRL_PAL_MASK; rctrl |= RCTRL_PADDING(8); priv->padding = 8; } /* Enable HW time stamping if requested from user space */ if (priv->hwts_rx_en) rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; /* keep vlan related bits if it's enabled */ if (priv->vlgrp) { rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; tctrl |= TCTRL_VLINS; } /* Init rctrl based on our settings */ gfar_write(&regs->rctrl, rctrl); if (ndev->features & NETIF_F_IP_CSUM) tctrl |= TCTRL_INIT_CSUM; tctrl |= TCTRL_TXSCHED_PRIO; gfar_write(&regs->tctrl, tctrl); /* Set the extraction length and index */ attrs = ATTRELI_EL(priv->rx_stash_size) | ATTRELI_EI(priv->rx_stash_index); gfar_write(&regs->attreli, attrs); /* Start with defaults, and add stashing or locking * depending on the approprate variables */ attrs = ATTR_INIT_SETTINGS; if (priv->bd_stash_en) attrs |= ATTR_BDSTASH; if (priv->rx_stash_size != 0) attrs |= ATTR_BUFSTASH; gfar_write(&regs->attr, attrs); gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold); gfar_write(&regs->fifo_tx_starve, priv->fifo_starve); gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); } static struct net_device_stats *gfar_get_stats(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct netdev_queue *txq; unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0; unsigned long tx_packets = 0, tx_bytes = 0; int i = 0; for (i = 0; i < priv->num_rx_queues; i++) { rx_packets += priv->rx_queue[i]->stats.rx_packets; rx_bytes += priv->rx_queue[i]->stats.rx_bytes; rx_dropped += priv->rx_queue[i]->stats.rx_dropped; } dev->stats.rx_packets = rx_packets; dev->stats.rx_bytes = rx_bytes; dev->stats.rx_dropped = rx_dropped; for (i = 0; i < priv->num_tx_queues; i++) { txq = netdev_get_tx_queue(dev, i); tx_bytes += txq->tx_bytes; tx_packets += txq->tx_packets; } dev->stats.tx_bytes = tx_bytes; dev->stats.tx_packets = tx_packets; return &dev->stats; } static const struct net_device_ops gfar_netdev_ops = { .ndo_open = gfar_enet_open, .ndo_start_xmit = gfar_start_xmit, .ndo_stop = gfar_close, .ndo_change_mtu = gfar_change_mtu, .ndo_set_multicast_list = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, .ndo_get_stats = gfar_get_stats, .ndo_vlan_rx_register = gfar_vlan_rx_register, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = gfar_netpoll, #endif }; unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; void lock_rx_qs(struct gfar_private *priv) { int i = 0x0; for (i = 0; i < priv->num_rx_queues; i++) spin_lock(&priv->rx_queue[i]->rxlock); } void lock_tx_qs(struct gfar_private *priv) { int i = 0x0; for (i = 0; i < priv->num_tx_queues; i++) spin_lock(&priv->tx_queue[i]->txlock); } void unlock_rx_qs(struct gfar_private *priv) { int i = 0x0; for (i = 0; i < priv->num_rx_queues; i++) spin_unlock(&priv->rx_queue[i]->rxlock); } void unlock_tx_qs(struct gfar_private *priv) { int i = 0x0; for (i = 0; i < priv->num_tx_queues; i++) spin_unlock(&priv->tx_queue[i]->txlock); } /* Returns 1 if incoming frames use an FCB */ static inline int gfar_uses_fcb(struct gfar_private *priv) { return priv->vlgrp || priv->rx_csum_enable || (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER); } static void free_tx_pointers(struct gfar_private *priv) { int i = 0; for (i = 0; i < priv->num_tx_queues; i++) kfree(priv->tx_queue[i]); } static void free_rx_pointers(struct gfar_private *priv) { int i = 0; for (i = 0; i < priv->num_rx_queues; i++) kfree(priv->rx_queue[i]); } static void unmap_group_regs(struct gfar_private *priv) { int i = 0; for (i = 0; i < MAXGROUPS; i++) if (priv->gfargrp[i].regs) iounmap(priv->gfargrp[i].regs); } static void disable_napi(struct gfar_private *priv) { int i = 0; for (i = 0; i < priv->num_grps; i++) napi_disable(&priv->gfargrp[i].napi); } static void enable_napi(struct gfar_private *priv) { int i = 0; for (i = 0; i < priv->num_grps; i++) napi_enable(&priv->gfargrp[i].napi); } static int gfar_parse_group(struct device_node *np, struct gfar_private *priv, const char *model) { u32 *queue_mask; priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0); if (!priv->gfargrp[priv->num_grps].regs) return -ENOMEM; priv->gfargrp[priv->num_grps].interruptTransmit = irq_of_parse_and_map(np, 0); /* If we aren't the FEC we have multiple interrupts */ if (model && strcasecmp(model, "FEC")) { priv->gfargrp[priv->num_grps].interruptReceive = irq_of_parse_and_map(np, 1); priv->gfargrp[priv->num_grps].interruptError = irq_of_parse_and_map(np,2); if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || priv->gfargrp[priv->num_grps].interruptReceive < 0 || priv->gfargrp[priv->num_grps].interruptError < 0) { return -EINVAL; } } priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; priv->gfargrp[priv->num_grps].priv = priv; spin_lock_init(&priv->gfargrp[priv->num_grps].grplock); if(priv->mode == MQ_MG_MODE) { queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps); queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); } else { priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF; priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF; } priv->num_grps++; return 0; } static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) { const char *model; const char *ctype; const void *mac_addr; int err = 0, i; struct net_device *dev = NULL; struct gfar_private *priv = NULL; struct device_node *np = ofdev->dev.of_node; struct device_node *child = NULL; const u32 *stash; const u32 *stash_len; const u32 *stash_idx; unsigned int num_tx_qs, num_rx_qs; u32 *tx_queues, *rx_queues; if (!np || !of_device_is_available(np)) return -ENODEV; /* parse the num of tx and rx queues */ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); num_tx_qs = tx_queues ? *tx_queues : 1; if (num_tx_qs > MAX_TX_QS) { printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", num_tx_qs, MAX_TX_QS); printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); return -EINVAL; } rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); num_rx_qs = rx_queues ? *rx_queues : 1; if (num_rx_qs > MAX_RX_QS) { printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", num_tx_qs, MAX_TX_QS); printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n"); return -EINVAL; } *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs); dev = *pdev; if (NULL == dev) return -ENOMEM; priv = netdev_priv(dev); priv->node = ofdev->dev.of_node; priv->ndev = dev; dev->num_tx_queues = num_tx_qs; dev->real_num_tx_queues = num_tx_qs; priv->num_tx_queues = num_tx_qs; priv->num_rx_queues = num_rx_qs; priv->num_grps = 0x0; model = of_get_property(np, "model", NULL); for (i = 0; i < MAXGROUPS; i++) priv->gfargrp[i].regs = NULL; /* Parse and initialize group specific information */ if (of_device_is_compatible(np, "fsl,etsec2")) { priv->mode = MQ_MG_MODE; for_each_child_of_node(np, child) { err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; } } else { priv->mode = SQ_SG_MODE; err = gfar_parse_group(np, priv, model); if(err) goto err_grp_init; } for (i = 0; i < priv->num_tx_queues; i++) priv->tx_queue[i] = NULL; for (i = 0; i < priv->num_rx_queues; i++) priv->rx_queue[i] = NULL; for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( sizeof (struct gfar_priv_tx_q), GFP_KERNEL); if (!priv->tx_queue[i]) { err = -ENOMEM; goto tx_alloc_failed; } priv->tx_queue[i]->tx_skbuff = NULL; priv->tx_queue[i]->qindex = i; priv->tx_queue[i]->dev = dev; spin_lock_init(&(priv->tx_queue[i]->txlock)); } for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( sizeof (struct gfar_priv_rx_q), GFP_KERNEL); if (!priv->rx_queue[i]) { err = -ENOMEM; goto rx_alloc_failed; } priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; priv->rx_queue[i]->dev = dev; spin_lock_init(&(priv->rx_queue[i]->rxlock)); } stash = of_get_property(np, "bd-stash", NULL); if (stash) { priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; priv->bd_stash_en = 1; } stash_len = of_get_property(np, "rx-stash-len", NULL); if (stash_len) priv->rx_stash_size = *stash_len; stash_idx = of_get_property(np, "rx-stash-idx", NULL); if (stash_idx) priv->rx_stash_index = *stash_idx; if (stash_len || stash_idx) priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING; mac_addr = of_get_mac_address(np); if (mac_addr) memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN); if (model && !strcasecmp(model, "TSEC")) priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | FSL_GIANFAR_DEV_HAS_MULTI_INTR; if (model && !strcasecmp(model, "eTSEC")) priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | FSL_GIANFAR_DEV_HAS_MULTI_INTR | FSL_GIANFAR_DEV_HAS_PADDING | FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | FSL_GIANFAR_DEV_HAS_TIMER; ctype = of_get_property(np, "phy-connection-type", NULL); /* We only care about rgmii-id. The rest are autodetected */ if (ctype && !strcmp(ctype, "rgmii-id")) priv->interface = PHY_INTERFACE_MODE_RGMII_ID; else priv->interface = PHY_INTERFACE_MODE_MII; if (of_get_property(np, "fsl,magic-packet", NULL)) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0); return 0; rx_alloc_failed: free_rx_pointers(priv); tx_alloc_failed: free_tx_pointers(priv); err_grp_init: unmap_group_regs(priv); free_netdev(dev); return err; } static int gfar_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct hwtstamp_config config; struct gfar_private *priv = netdev_priv(netdev); if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: priv->hwts_tx_en = 0; break; case HWTSTAMP_TX_ON: if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) return -ERANGE; priv->hwts_tx_en = 1; break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: if (priv->hwts_rx_en) { stop_gfar(netdev); priv->hwts_rx_en = 0; startup_gfar(netdev); } break; default: if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) return -ERANGE; if (!priv->hwts_rx_en) { stop_gfar(netdev); priv->hwts_rx_en = 1; startup_gfar(netdev); } config.rx_filter = HWTSTAMP_FILTER_ALL; break; } return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } /* Ioctl MII Interface */ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct gfar_private *priv = netdev_priv(dev); if (!netif_running(dev)) return -EINVAL; if (cmd == SIOCSHWTSTAMP) return gfar_hwtstamp_ioctl(dev, rq, cmd); if (!priv->phydev) return -ENODEV; return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); } static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs) { unsigned int new_bit_map = 0x0; int mask = 0x1 << (max_qs - 1), i; for (i = 0; i < max_qs; i++) { if (bit_map & mask) new_bit_map = new_bit_map + (1 << i); mask = mask >> 0x1; } return new_bit_map; } static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, u32 class) { u32 rqfpr = FPR_FILER_MASK; u32 rqfcr = 0x0; rqfar--; rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; ftp_rqfpr[rqfar] = rqfpr; ftp_rqfcr[rqfar] = rqfcr; gfar_write_filer(priv, rqfar, rqfcr, rqfpr); rqfar--; rqfcr = RQFCR_CMP_NOMATCH; ftp_rqfpr[rqfar] = rqfpr; ftp_rqfcr[rqfar] = rqfcr; gfar_write_filer(priv, rqfar, rqfcr, rqfpr); rqfar--; rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; rqfpr = class; ftp_rqfcr[rqfar] = rqfcr; ftp_rqfpr[rqfar] = rqfpr; gfar_write_filer(priv, rqfar, rqfcr, rqfpr); rqfar--; rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; rqfpr = class; ftp_rqfcr[rqfar] = rqfcr; ftp_rqfpr[rqfar] = rqfpr; gfar_write_filer(priv, rqfar, rqfcr, rqfpr); return rqfar; } static void gfar_init_filer_table(struct gfar_private *priv) { int i = 0x0; u32 rqfar = MAX_FILER_IDX; u32 rqfcr = 0x0; u32 rqfpr = FPR_FILER_MASK; /* Default rule */ rqfcr = RQFCR_CMP_MATCH; ftp_rqfcr[rqfar] = rqfcr; ftp_rqfpr[rqfar] = rqfpr; gfar_write_filer(priv, rqfar, rqfcr, rqfpr); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP); rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP); /* cur_filer_idx indicated the fisrt non-masked rule */ priv->cur_filer_idx = rqfar; /* Rest are masked rules */ rqfcr = RQFCR_CMP_NOMATCH; for (i = 0; i < rqfar; i++) { ftp_rqfcr[i] = rqfcr; ftp_rqfpr[i] = rqfpr; gfar_write_filer(priv, i, rqfcr, rqfpr); } } /* Set up the ethernet device structure, private data, * and anything else we need before we start */ static int gfar_probe(struct of_device *ofdev, const struct of_device_id *match) { u32 tempval; struct net_device *dev = NULL; struct gfar_private *priv = NULL; struct gfar __iomem *regs = NULL; int err = 0, i, grp_idx = 0; int len_devname; u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0; u32 isrg = 0; u32 __iomem *baddr; err = gfar_of_init(ofdev, &dev); if (err) return err; priv = netdev_priv(dev); priv->ndev = dev; priv->ofdev = ofdev; priv->node = ofdev->dev.of_node; SET_NETDEV_DEV(dev, &ofdev->dev); spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); dev_set_drvdata(&ofdev->dev, priv); regs = priv->gfargrp[0].regs; /* Stop the DMA engine now, in case it was running before */ /* (The firmware could have used it, and left it running). */ gfar_halt(dev); /* Reset MAC layer */ gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); /* We need to delay at least 3 TX clocks */ udelay(2); tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); gfar_write(&regs->maccfg1, tempval); /* Initialize MACCFG2. */ gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS); /* Initialize ECNTRL */ gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long) regs; SET_NETDEV_DEV(dev, &ofdev->dev); /* Fill in the dev structure */ dev->watchdog_timeo = TX_TIMEOUT; dev->mtu = 1500; dev->netdev_ops = &gfar_netdev_ops; dev->ethtool_ops = &gfar_ethtool_ops; /* Register for napi ...We are registering NAPI for each grp */ for (i = 0; i < priv->num_grps; i++) netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { priv->rx_csum_enable = 1; dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; } else priv->rx_csum_enable = 0; priv->vlgrp = NULL; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { priv->extended_hash = 1; priv->hash_width = 9; priv->hash_regs[0] = &regs->igaddr0; priv->hash_regs[1] = &regs->igaddr1; priv->hash_regs[2] = &regs->igaddr2; priv->hash_regs[3] = &regs->igaddr3; priv->hash_regs[4] = &regs->igaddr4; priv->hash_regs[5] = &regs->igaddr5; priv->hash_regs[6] = &regs->igaddr6; priv->hash_regs[7] = &regs->igaddr7; priv->hash_regs[8] = &regs->gaddr0; priv->hash_regs[9] = &regs->gaddr1; priv->hash_regs[10] = &regs->gaddr2; priv->hash_regs[11] = &regs->gaddr3; priv->hash_regs[12] = &regs->gaddr4; priv->hash_regs[13] = &regs->gaddr5; priv->hash_regs[14] = &regs->gaddr6; priv->hash_regs[15] = &regs->gaddr7; } else { priv->extended_hash = 0; priv->hash_width = 8; priv->hash_regs[0] = &regs->gaddr0; priv->hash_regs[1] = &regs->gaddr1; priv->hash_regs[2] = &regs->gaddr2; priv->hash_regs[3] = &regs->gaddr3; priv->hash_regs[4] = &regs->gaddr4; priv->hash_regs[5] = &regs->gaddr5; priv->hash_regs[6] = &regs->gaddr6; priv->hash_regs[7] = &regs->gaddr7; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) priv->padding = DEFAULT_PADDING; else priv->padding = 0; if (dev->features & NETIF_F_IP_CSUM || priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->hard_header_len += GMAC_FCB_LEN; /* Program the isrg regs only if number of grps > 1 */ if (priv->num_grps > 1) { baddr = &regs->isrg0; for (i = 0; i < priv->num_grps; i++) { isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); gfar_write(baddr, isrg); baddr++; isrg = 0x0; } } /* Need to reverse the bit maps as bit_map's MSB is q0 * but, for_each_set_bit parses from right to left, which * basically reverses the queue numbers */ for (i = 0; i< priv->num_grps; i++) { priv->gfargrp[i].tx_bit_map = reverse_bitmap( priv->gfargrp[i].tx_bit_map, MAX_TX_QS); priv->gfargrp[i].rx_bit_map = reverse_bitmap( priv->gfargrp[i].rx_bit_map, MAX_RX_QS); } /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, * also assign queues to groups */ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { priv->gfargrp[grp_idx].num_rx_queues = 0x0; for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map, priv->num_rx_queues) { priv->gfargrp[grp_idx].num_rx_queues++; priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx]; rstat = rstat | (RSTAT_CLEAR_RHALT >> i); rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i); } priv->gfargrp[grp_idx].num_tx_queues = 0x0; for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map, priv->num_tx_queues) { priv->gfargrp[grp_idx].num_tx_queues++; priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx]; tstat = tstat | (TSTAT_CLEAR_THALT >> i); tqueue = tqueue | (TQUEUE_EN0 >> i); } priv->gfargrp[grp_idx].rstat = rstat; priv->gfargrp[grp_idx].tstat = tstat; rstat = tstat =0; } gfar_write(&regs->rqueue, rqueue); gfar_write(&regs->tqueue, tqueue); priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE; priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE; priv->tx_queue[i]->txic = DEFAULT_TXIC; } for (i = 0; i < priv->num_rx_queues; i++) { priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE; priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE; priv->rx_queue[i]->rxic = DEFAULT_RXIC; } /* enable filer if using multiple RX queues*/ if(priv->num_rx_queues > 1) priv->rx_filer_enable = 1; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); err = register_netdev(dev); if (err) { printk(KERN_ERR "%s: Cannot register net device, aborting.\n", dev->name); goto register_fail; } device_init_wakeup(&dev->dev, priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); /* fill out IRQ number and name fields */ len_devname = strlen(dev->name); for (i = 0; i < priv->num_grps; i++) { strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name, len_devname); if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { strncpy(&priv->gfargrp[i].int_name_tx[len_devname], "_g", sizeof("_g")); priv->gfargrp[i].int_name_tx[ strlen(priv->gfargrp[i].int_name_tx)] = i+48; strncpy(&priv->gfargrp[i].int_name_tx[strlen( priv->gfargrp[i].int_name_tx)], "_tx", sizeof("_tx") + 1); strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name, len_devname); strncpy(&priv->gfargrp[i].int_name_rx[len_devname], "_g", sizeof("_g")); priv->gfargrp[i].int_name_rx[ strlen(priv->gfargrp[i].int_name_rx)] = i+48; strncpy(&priv->gfargrp[i].int_name_rx[strlen( priv->gfargrp[i].int_name_rx)], "_rx", sizeof("_rx") + 1); strncpy(&priv->gfargrp[i].int_name_er[0], dev->name, len_devname); strncpy(&priv->gfargrp[i].int_name_er[len_devname], "_g", sizeof("_g")); priv->gfargrp[i].int_name_er[strlen( priv->gfargrp[i].int_name_er)] = i+48; strncpy(&priv->gfargrp[i].int_name_er[strlen(\ priv->gfargrp[i].int_name_er)], "_er", sizeof("_er") + 1); } else priv->gfargrp[i].int_name_tx[len_devname] = '\0'; } /* Initialize the filer table */ gfar_init_filer_table(priv); /* Create all the sysfs files */ gfar_init_sysfs(dev); /* Print out the device info */ printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr); /* Even more device info helps when determining which kernel */ /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); for (i = 0; i < priv->num_rx_queues; i++) printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", dev->name, i, priv->rx_queue[i]->rx_ring_size); for(i = 0; i < priv->num_tx_queues; i++) printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", dev->name, i, priv->tx_queue[i]->tx_ring_size); return 0; register_fail: unmap_group_regs(priv); free_tx_pointers(priv); free_rx_pointers(priv); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); free_netdev(dev); return err; } static int gfar_remove(struct of_device *ofdev) { struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); if (priv->phy_node) of_node_put(priv->phy_node); if (priv->tbi_node) of_node_put(priv->tbi_node); dev_set_drvdata(&ofdev->dev, NULL); unregister_netdev(priv->ndev); unmap_group_regs(priv); free_netdev(priv->ndev); return 0; } #ifdef CONFIG_PM static int gfar_suspend(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); netif_device_detach(ndev); if (netif_running(ndev)) { local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); gfar_halt_nodisable(ndev); /* Disable Tx, and Rx if wake-on-LAN is disabled. */ tempval = gfar_read(&regs->maccfg1); tempval &= ~MACCFG1_TX_EN; if (!magic_packet) tempval &= ~MACCFG1_RX_EN; gfar_write(&regs->maccfg1, tempval); unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); disable_napi(priv); if (magic_packet) { /* Enable interrupt on Magic Packet */ gfar_write(&regs->imask, IMASK_MAG); /* Enable Magic Packet mode */ tempval = gfar_read(&regs->maccfg2); tempval |= MACCFG2_MPEN; gfar_write(&regs->maccfg2, tempval); } else { phy_stop(priv->phydev); } } return 0; } static int gfar_resume(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); if (!netif_running(ndev)) { netif_device_attach(ndev); return 0; } if (!magic_packet && priv->phydev) phy_start(priv->phydev); /* Disable Magic Packet mode, in case something * else woke us up. */ local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); tempval = gfar_read(&regs->maccfg2); tempval &= ~MACCFG2_MPEN; gfar_write(&regs->maccfg2, tempval); gfar_start(ndev); unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); netif_device_attach(ndev); enable_napi(priv); return 0; } static int gfar_restore(struct device *dev) { struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; if (!netif_running(ndev)) return 0; gfar_init_bds(ndev); init_registers(ndev); gfar_set_mac_address(ndev); gfar_init_mac(ndev); gfar_start(ndev); priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; if (priv->phydev) phy_start(priv->phydev); netif_device_attach(ndev); enable_napi(priv); return 0; } static struct dev_pm_ops gfar_pm_ops = { .suspend = gfar_suspend, .resume = gfar_resume, .freeze = gfar_suspend, .thaw = gfar_resume, .restore = gfar_restore, }; #define GFAR_PM_OPS (&gfar_pm_ops) #else #define GFAR_PM_OPS NULL #endif /* Reads the controller's registers to determine what interface * connects it to the PHY. */ static phy_interface_t gfar_get_interface(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 ecntrl; ecntrl = gfar_read(&regs->ecntrl); if (ecntrl & ECNTRL_SGMII_MODE) return PHY_INTERFACE_MODE_SGMII; if (ecntrl & ECNTRL_TBI_MODE) { if (ecntrl & ECNTRL_REDUCED_MODE) return PHY_INTERFACE_MODE_RTBI; else return PHY_INTERFACE_MODE_TBI; } if (ecntrl & ECNTRL_REDUCED_MODE) { if (ecntrl & ECNTRL_REDUCED_MII_MODE) return PHY_INTERFACE_MODE_RMII; else { phy_interface_t interface = priv->interface; /* * This isn't autodetected right now, so it must * be set by the device tree or platform code. */ if (interface == PHY_INTERFACE_MODE_RGMII_ID) return PHY_INTERFACE_MODE_RGMII_ID; return PHY_INTERFACE_MODE_RGMII; } } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) return PHY_INTERFACE_MODE_GMII; return PHY_INTERFACE_MODE_MII; } /* Initializes driver's PHY state, and attaches to the PHY. * Returns 0 on success. */ static int init_phy(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); uint gigabit_support = priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0; phy_interface_t interface; priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; interface = gfar_get_interface(dev); priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, interface); if (!priv->phydev) priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, interface); if (!priv->phydev) { dev_err(&dev->dev, "could not attach to PHY\n"); return -ENODEV; } if (interface == PHY_INTERFACE_MODE_SGMII) gfar_configure_serdes(dev); /* Remove any features not supported by the controller */ priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); priv->phydev->advertising = priv->phydev->supported; return 0; } /* * Initialize TBI PHY interface for communicating with the * SERDES lynx PHY on the chip. We communicate with this PHY * through the MDIO bus on each controller, treating it as a * "normal" PHY at the address found in the TBIPA register. We assume * that the TBIPA register is valid. Either the MDIO bus code will set * it to a value that doesn't conflict with other PHYs on the bus, or the * value doesn't matter, as there are no other PHYs on the bus. */ static void gfar_configure_serdes(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct phy_device *tbiphy; if (!priv->tbi_node) { dev_warn(&dev->dev, "error: SGMII mode requires that the " "device tree specify a tbi-handle\n"); return; } tbiphy = of_phy_find_device(priv->tbi_node); if (!tbiphy) { dev_err(&dev->dev, "error: Could not get TBI device\n"); return; } /* * If the link is already up, we must already be ok, and don't need to * configure and reset the TBI<->SerDes link. Maybe U-Boot configured * everything for us? Resetting it takes the link down and requires * several seconds for it to come back. */ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) return; /* Single clk mode, mii mode off(for serdes communication) */ phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); phy_write(tbiphy, MII_ADVERTISE, ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM); phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); } static void init_registers(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = NULL; int i = 0; for (i = 0; i < priv->num_grps; i++) { regs = priv->gfargrp[i].regs; /* Clear IEVENT */ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); /* Initialize IMASK */ gfar_write(&regs->imask, IMASK_INIT_CLEAR); } regs = priv->gfargrp[0].regs; /* Init hash registers to zero */ gfar_write(&regs->igaddr0, 0); gfar_write(&regs->igaddr1, 0); gfar_write(&regs->igaddr2, 0); gfar_write(&regs->igaddr3, 0); gfar_write(&regs->igaddr4, 0); gfar_write(&regs->igaddr5, 0); gfar_write(&regs->igaddr6, 0); gfar_write(&regs->igaddr7, 0); gfar_write(&regs->gaddr0, 0); gfar_write(&regs->gaddr1, 0); gfar_write(&regs->gaddr2, 0); gfar_write(&regs->gaddr3, 0); gfar_write(&regs->gaddr4, 0); gfar_write(&regs->gaddr5, 0); gfar_write(&regs->gaddr6, 0); gfar_write(&regs->gaddr7, 0); /* Zero out the rmon mib registers if it has them */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib)); /* Mask off the CAM interrupts */ gfar_write(&regs->rmon.cam1, 0xffffffff); gfar_write(&regs->rmon.cam2, 0xffffffff); } /* Initialize the max receive buffer length */ gfar_write(&regs->mrblr, priv->rx_buffer_size); /* Initialize the Minimum Frame Length Register */ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS); } /* Halt the receive and transmit queues */ static void gfar_halt_nodisable(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = NULL; u32 tempval; int i = 0; for (i = 0; i < priv->num_grps; i++) { regs = priv->gfargrp[i].regs; /* Mask all interrupts */ gfar_write(&regs->imask, IMASK_INIT_CLEAR); /* Clear all interrupts */ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); } regs = priv->gfargrp[0].regs; /* Stop the DMA, and wait for it to stop */ tempval = gfar_read(&regs->dmactrl); if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != (DMACTRL_GRS | DMACTRL_GTS)) { tempval |= (DMACTRL_GRS | DMACTRL_GTS); gfar_write(&regs->dmactrl, tempval); spin_event_timeout(((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) == (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); } } /* Halt the receive and transmit queues */ void gfar_halt(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; gfar_halt_nodisable(dev); /* Disable Rx and Tx */ tempval = gfar_read(&regs->maccfg1); tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(&regs->maccfg1, tempval); } static void free_grp_irqs(struct gfar_priv_grp *grp) { free_irq(grp->interruptError, grp); free_irq(grp->interruptTransmit, grp); free_irq(grp->interruptReceive, grp); } void stop_gfar(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); unsigned long flags; int i; phy_stop(priv->phydev); /* Lock it down */ local_irq_save(flags); lock_tx_qs(priv); lock_rx_qs(priv); gfar_halt(dev); unlock_rx_qs(priv); unlock_tx_qs(priv); local_irq_restore(flags); /* Free the IRQs */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { for (i = 0; i < priv->num_grps; i++) free_grp_irqs(&priv->gfargrp[i]); } else { for (i = 0; i < priv->num_grps; i++) free_irq(priv->gfargrp[i].interruptTransmit, &priv->gfargrp[i]); } free_skb_resources(priv); } static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue) { struct txbd8 *txbdp; struct gfar_private *priv = netdev_priv(tx_queue->dev); int i, j; txbdp = tx_queue->tx_bd_base; for (i = 0; i < tx_queue->tx_ring_size; i++) { if (!tx_queue->tx_skbuff[i]) continue; dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); txbdp->lstatus = 0; for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags; j++) { txbdp++; dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, txbdp->length, DMA_TO_DEVICE); } txbdp++; dev_kfree_skb_any(tx_queue->tx_skbuff[i]); tx_queue->tx_skbuff[i] = NULL; } kfree(tx_queue->tx_skbuff); } static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { struct rxbd8 *rxbdp; struct gfar_private *priv = netdev_priv(rx_queue->dev); int i; rxbdp = rx_queue->rx_bd_base; for (i = 0; i < rx_queue->rx_ring_size; i++) { if (rx_queue->rx_skbuff[i]) { dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_queue->rx_skbuff[i]); rx_queue->rx_skbuff[i] = NULL; } rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; } kfree(rx_queue->rx_skbuff); } /* If there are any tx skbs or rx skbs still around, free them. * Then free tx_skbuff and rx_skbuff */ static void free_skb_resources(struct gfar_private *priv) { struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; int i; /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; if(tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; if(rx_queue->rx_skbuff) free_skb_rx_queue(rx_queue); } dma_free_coherent(&priv->ofdev->dev, sizeof(struct txbd8) * priv->total_tx_ring_size + sizeof(struct rxbd8) * priv->total_rx_ring_size, priv->tx_queue[0]->tx_bd_base, priv->tx_queue[0]->tx_bd_dma_base); skb_queue_purge(&priv->rx_recycle); } void gfar_start(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; int i = 0; /* Enable Rx and Tx in MACCFG1 */ tempval = gfar_read(&regs->maccfg1); tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); gfar_write(&regs->maccfg1, tempval); /* Initialize DMACTRL to have WWR and WOP */ tempval = gfar_read(&regs->dmactrl); tempval |= DMACTRL_INIT_SETTINGS; gfar_write(&regs->dmactrl, tempval); /* Make sure we aren't stopped */ tempval = gfar_read(&regs->dmactrl); tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); gfar_write(&regs->dmactrl, tempval); for (i = 0; i < priv->num_grps; i++) { regs = priv->gfargrp[i].regs; /* Clear THLT/RHLT, so that the DMA starts polling now */ gfar_write(&regs->tstat, priv->gfargrp[i].tstat); gfar_write(&regs->rstat, priv->gfargrp[i].rstat); /* Unmask the interrupts we look for */ gfar_write(&regs->imask, IMASK_DEFAULT); } dev->trans_start = jiffies; /* prevent tx timeout */ } void gfar_configure_coalescing(struct gfar_private *priv, unsigned long tx_mask, unsigned long rx_mask) { struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 __iomem *baddr; int i = 0; /* Backward compatible case ---- even if we enable * multiple queues, there's only single reg to program */ gfar_write(&regs->txic, 0); if(likely(priv->tx_queue[0]->txcoalescing)) gfar_write(&regs->txic, priv->tx_queue[0]->txic); gfar_write(&regs->rxic, 0); if(unlikely(priv->rx_queue[0]->rxcoalescing)) gfar_write(&regs->rxic, priv->rx_queue[0]->rxic); if (priv->mode == MQ_MG_MODE) { baddr = &regs->txic0; for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { if (likely(priv->tx_queue[i]->txcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->tx_queue[i]->txic); } } baddr = &regs->rxic0; for_each_set_bit(i, &rx_mask, priv->num_rx_queues) { if (likely(priv->rx_queue[i]->rxcoalescing)) { gfar_write(baddr + i, 0); gfar_write(baddr + i, priv->rx_queue[i]->rxic); } } } } static int register_grp_irqs(struct gfar_priv_grp *grp) { struct gfar_private *priv = grp->priv; struct net_device *dev = priv->ndev; int err; /* If the device has multiple interrupts, register for * them. Otherwise, only register for the one */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { /* Install our interrupt handlers for Error, * Transmit, and Receive */ if ((err = request_irq(grp->interruptError, gfar_error, 0, grp->int_name_er,grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, grp->interruptError); goto err_irq_fail; } if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 0, grp->int_name_tx, grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, grp->interruptTransmit); goto tx_irq_fail; } if ((err = request_irq(grp->interruptReceive, gfar_receive, 0, grp->int_name_rx, grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, grp->interruptReceive); goto rx_irq_fail; } } else { if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0, grp->int_name_tx, grp)) < 0) { if (netif_msg_intr(priv)) printk(KERN_ERR "%s: Can't get IRQ %d\n", dev->name, grp->interruptTransmit); goto err_irq_fail; } } return 0; rx_irq_fail: free_irq(grp->interruptTransmit, grp); tx_irq_fail: free_irq(grp->interruptError, grp); err_irq_fail: return err; } /* Bring the controller up and running */ int startup_gfar(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = NULL; int err, i, j; for (i = 0; i < priv->num_grps; i++) { regs= priv->gfargrp[i].regs; gfar_write(&regs->imask, IMASK_INIT_CLEAR); } regs= priv->gfargrp[0].regs; err = gfar_alloc_skb_resources(ndev); if (err) return err; gfar_init_mac(ndev); for (i = 0; i < priv->num_grps; i++) { err = register_grp_irqs(&priv->gfargrp[i]); if (err) { for (j = 0; j < i; j++) free_grp_irqs(&priv->gfargrp[j]); goto irq_fail; } } /* Start the controller */ gfar_start(ndev); phy_start(priv->phydev); gfar_configure_coalescing(priv, 0xFF, 0xFF); return 0; irq_fail: free_skb_resources(priv); return err; } /* Called when something needs to use the ethernet device */ /* Returns 0 for success. */ static int gfar_enet_open(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); int err; enable_napi(priv); skb_queue_head_init(&priv->rx_recycle); /* Initialize a bunch of registers */ init_registers(dev); gfar_set_mac_address(dev); err = init_phy(dev); if (err) { disable_napi(priv); return err; } err = startup_gfar(dev); if (err) { disable_napi(priv); return err; } netif_tx_start_all_queues(dev); device_set_wakeup_enable(&dev->dev, priv->wol_en); return err; } static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb) { struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN); memset(fcb, 0, GMAC_FCB_LEN); return fcb; } static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb) { u8 flags = 0; /* If we're here, it's a IP packet with a TCP or UDP * payload. We set it to checksum, using a pseudo-header * we provide */ flags = TXFCB_DEFAULT; /* Tell the controller what the protocol is */ /* And provide the already calculated phcs */ if (ip_hdr(skb)->protocol == IPPROTO_UDP) { flags |= TXFCB_UDP; fcb->phcs = udp_hdr(skb)->check; } else fcb->phcs = tcp_hdr(skb)->check; /* l3os is the distance between the start of the * frame (skb->data) and the start of the IP hdr. * l4os is the distance between the start of the * l3 hdr and the l4 hdr */ fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN); fcb->l4os = skb_network_header_len(skb); fcb->flags = flags; } void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; fcb->vlctl = vlan_tx_tag_get(skb); } static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride, struct txbd8 *base, int ring_size) { struct txbd8 *new_bd = bdp + stride; return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd; } static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base, int ring_size) { return skip_txbd(bdp, 1, base, ring_size); } /* This is called by the kernel when a frame is ready for transmission. */ /* It is pointed to by the dev->hard_start_xmit function pointer */ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_tx_q *tx_queue = NULL; struct netdev_queue *txq; struct gfar __iomem *regs = NULL; struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; int i, rq = 0, do_tstamp = 0; u32 bufaddr; unsigned long flags; unsigned int nr_frags, nr_txbds, length; union skb_shared_tx *shtx; rq = skb->queue_mapping; tx_queue = priv->tx_queue[rq]; txq = netdev_get_tx_queue(dev, rq); base = tx_queue->tx_bd_base; regs = tx_queue->grp->regs; shtx = skb_tx(skb); /* check if time stamp should be generated */ if (unlikely(shtx->hardware && priv->hwts_tx_en)) do_tstamp = 1; /* make space for additional header when fcb is needed */ if (((skb->ip_summed == CHECKSUM_PARTIAL) || (priv->vlgrp && vlan_tx_tag_present(skb)) || unlikely(do_tstamp)) && (skb_headroom(skb) < GMAC_FCB_LEN)) { struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN); if (!skb_new) { dev->stats.tx_errors++; kfree_skb(skb); return NETDEV_TX_OK; } kfree_skb(skb); skb = skb_new; } /* total number of fragments in the SKB */ nr_frags = skb_shinfo(skb)->nr_frags; /* calculate the required number of TxBDs for this skb */ if (unlikely(do_tstamp)) nr_txbds = nr_frags + 2; else nr_txbds = nr_frags + 1; /* check if there is space to queue this packet */ if (nr_txbds > tx_queue->num_txbdfree) { /* no space, stop the queue */ netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; return NETDEV_TX_BUSY; } /* Update transmit stats */ txq->tx_bytes += skb->len; txq->tx_packets ++; txbdp = txbdp_start = tx_queue->cur_tx; lstatus = txbdp->lstatus; /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) txbdp_tstamp = txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); if (nr_frags == 0) { if (unlikely(do_tstamp)) txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); else lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); } else { /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { /* Point at the next BD, wrapping as needed */ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); length = skb_shinfo(skb)->frags[i].size; lstatus = txbdp->lstatus | length | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ if (i == nr_frags - 1) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); bufaddr = dma_map_page(&priv->ofdev->dev, skb_shinfo(skb)->frags[i].page, skb_shinfo(skb)->frags[i].page_offset, length, DMA_TO_DEVICE); /* set the TxBD length and buffer pointer */ txbdp->bufPtr = bufaddr; txbdp->lstatus = lstatus; } lstatus = txbdp_start->lstatus; } /* Set up checksumming */ if (CHECKSUM_PARTIAL == skb->ip_summed) { fcb = gfar_add_fcb(skb); lstatus |= BD_LFLAG(TXBD_TOE); gfar_tx_checksum(skb, fcb); } if (priv->vlgrp && vlan_tx_tag_present(skb)) { if (unlikely(NULL == fcb)) { fcb = gfar_add_fcb(skb); lstatus |= BD_LFLAG(TXBD_TOE); } gfar_tx_vlan(skb, fcb); } /* Setup tx hardware time stamping if requested */ if (unlikely(do_tstamp)) { shtx->in_progress = 1; if (fcb == NULL) fcb = gfar_add_fcb(skb); fcb->ptp = 1; lstatus |= BD_LFLAG(TXBD_TOE); } txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); /* * If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of * GMAC_FCB_LEN. The second TxBD points to the actual frame data with * the full frame length. */ if (unlikely(do_tstamp)) { txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN; txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) | (skb_headlen(skb) - GMAC_FCB_LEN); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } /* * We can work in parallel with gfar_clean_tx_ring(), except * when modifying num_txbdfree. Note that we didn't grab the lock * when we were reading the num_txbdfree and checking for available * space, that's because outside of this function it can only grow, * and once we've got needed space, it cannot suddenly disappear. * * The lock also protects us from gfar_error(), which can modify * regs->tstat and thus retrigger the transfers, which is why we * also must grab the lock before setting ready bit for the first * to be transmitted BD. */ spin_lock_irqsave(&tx_queue->txlock, flags); /* * The powerpc-specific eieio() is used, as wmb() has too strong * semantics (it requires synchronization between cacheable and * uncacheable mappings, which eieio doesn't provide and which we * don't need), thus requiring a more expensive sync instruction. At * some point, the set of architecture-independent barrier functions * should be expanded to include weaker barriers. */ eieio(); txbdp_start->lstatus = lstatus; eieio(); /* force lstatus write before tx_skbuff */ tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb; /* Update the current skb pointer to the next entry we will use * (wrapping if necessary) */ tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) & TX_RING_MOD_MASK(tx_queue->tx_ring_size); tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); /* reduce TxBD free count */ tx_queue->num_txbdfree -= (nr_txbds); /* If the next BD still needs to be cleaned up, then the bds are full. We need to tell the kernel to stop sending us stuff. */ if (!tx_queue->num_txbdfree) { netif_tx_stop_queue(txq); dev->stats.tx_fifo_errors++; } /* Tell the DMA to go go go */ gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex); /* Unlock priv */ spin_unlock_irqrestore(&tx_queue->txlock, flags); return NETDEV_TX_OK; } /* Stops the kernel queue, and halts the controller */ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); disable_napi(priv); cancel_work_sync(&priv->reset_task); stop_gfar(dev); /* Disconnect from the PHY */ phy_disconnect(priv->phydev); priv->phydev = NULL; netif_tx_stop_all_queues(dev); return 0; } /* Changes the mac address if the controller is not running. */ static int gfar_set_mac_address(struct net_device *dev) { gfar_set_mac_for_addr(dev, 0, dev->dev_addr); return 0; } /* Enables and disables VLAN insertion/extraction */ static void gfar_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = NULL; unsigned long flags; u32 tempval; regs = priv->gfargrp[0].regs; local_irq_save(flags); lock_rx_qs(priv); priv->vlgrp = grp; if (grp) { /* Enable VLAN tag insertion */ tempval = gfar_read(&regs->tctrl); tempval |= TCTRL_VLINS; gfar_write(&regs->tctrl, tempval); /* Enable VLAN tag extraction */ tempval = gfar_read(&regs->rctrl); tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); gfar_write(&regs->rctrl, tempval); } else { /* Disable VLAN tag insertion */ tempval = gfar_read(&regs->tctrl); tempval &= ~TCTRL_VLINS; gfar_write(&regs->tctrl, tempval); /* Disable VLAN tag extraction */ tempval = gfar_read(&regs->rctrl); tempval &= ~RCTRL_VLEX; /* If parse is no longer required, then disable parser */ if (tempval & RCTRL_REQ_PARSER) tempval |= RCTRL_PRSDEP_INIT; else tempval &= ~RCTRL_PRSDEP_INIT; gfar_write(&regs->rctrl, tempval); } gfar_change_mtu(dev, dev->mtu); unlock_rx_qs(priv); local_irq_restore(flags); } static int gfar_change_mtu(struct net_device *dev, int new_mtu) { int tempsize, tempval; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; int oldsize = priv->rx_buffer_size; int frame_size = new_mtu + ETH_HLEN; if (priv->vlgrp) frame_size += VLAN_HLEN; if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { if (netif_msg_drv(priv)) printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); return -EINVAL; } if (gfar_uses_fcb(priv)) frame_size += GMAC_FCB_LEN; frame_size += priv->padding; tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + INCREMENTAL_BUFFER_SIZE; /* Only stop and start the controller if it isn't already * stopped, and we changed something */ if ((oldsize != tempsize) && (dev->flags & IFF_UP)) stop_gfar(dev); priv->rx_buffer_size = tempsize; dev->mtu = new_mtu; gfar_write(&regs->mrblr, priv->rx_buffer_size); gfar_write(&regs->maxfrm, priv->rx_buffer_size); /* If the mtu is larger than the max size for standard * ethernet frames (ie, a jumbo frame), then set maccfg2 * to allow huge frames, and to check the length */ tempval = gfar_read(&regs->maccfg2); if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); else tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); gfar_write(&regs->maccfg2, tempval); if ((oldsize != tempsize) && (dev->flags & IFF_UP)) startup_gfar(dev); return 0; } /* gfar_reset_task gets scheduled when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and * starting over will fix the problem. */ static void gfar_reset_task(struct work_struct *work) { struct gfar_private *priv = container_of(work, struct gfar_private, reset_task); struct net_device *dev = priv->ndev; if (dev->flags & IFF_UP) { netif_tx_stop_all_queues(dev); stop_gfar(dev); startup_gfar(dev); netif_tx_start_all_queues(dev); } netif_tx_schedule_all(dev); } static void gfar_timeout(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); dev->stats.tx_errors++; schedule_work(&priv->reset_task); } /* Interrupt Handler for Transmit complete */ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { struct net_device *dev = tx_queue->dev; struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *bdp, *next = NULL; struct txbd8 *lbdp = NULL; struct txbd8 *base = tx_queue->tx_bd_base; struct sk_buff *skb; int skb_dirtytx; int tx_ring_size = tx_queue->tx_ring_size; int frags = 0, nr_txbds = 0; int i; int howmany = 0; u32 lstatus; size_t buflen; union skb_shared_tx *shtx; rx_queue = priv->rx_queue[tx_queue->qindex]; bdp = tx_queue->dirty_tx; skb_dirtytx = tx_queue->skb_dirtytx; while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { unsigned long flags; frags = skb_shinfo(skb)->nr_frags; /* * When time stamping, one additional TxBD must be freed. * Also, we need to dma_unmap_single() the TxPAL. */ shtx = skb_tx(skb); if (unlikely(shtx->in_progress)) nr_txbds = frags + 2; else nr_txbds = frags + 1; lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size); lstatus = lbdp->lstatus; /* Only clean completed frames */ if ((lstatus & BD_LFLAG(TXBD_READY)) && (lstatus & BD_LENGTH_MASK)) break; if (unlikely(shtx->in_progress)) { next = next_txbd(bdp, base, tx_ring_size); buflen = next->length + GMAC_FCB_LEN; } else buflen = bdp->length; dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, buflen, DMA_TO_DEVICE); if (unlikely(shtx->in_progress)) { struct skb_shared_hwtstamps shhwtstamps; u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); skb_tstamp_tx(skb, &shhwtstamps); bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next; } bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next_txbd(bdp, base, tx_ring_size); for (i = 0; i < frags; i++) { dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr, bdp->length, DMA_TO_DEVICE); bdp->lstatus &= BD_LFLAG(TXBD_WRAP); bdp = next_txbd(bdp, base, tx_ring_size); } /* * If there's room in the queue (limit it to rx_buffer_size) * we add this skb back into the pool, if it's the right size */ if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && skb_recycle_check(skb, priv->rx_buffer_size + RXBUF_ALIGNMENT)) __skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb_any(skb); tx_queue->tx_skbuff[skb_dirtytx] = NULL; skb_dirtytx = (skb_dirtytx + 1) & TX_RING_MOD_MASK(tx_ring_size); howmany++; spin_lock_irqsave(&tx_queue->txlock, flags); tx_queue->num_txbdfree += nr_txbds; spin_unlock_irqrestore(&tx_queue->txlock, flags); } /* If we freed a buffer, we can restart transmission, if necessary */ if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree) netif_wake_subqueue(dev, tx_queue->qindex); /* Update dirty indicators */ tx_queue->skb_dirtytx = skb_dirtytx; tx_queue->dirty_tx = bdp; return howmany; } static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) { unsigned long flags; spin_lock_irqsave(&gfargrp->grplock, flags); if (napi_schedule_prep(&gfargrp->napi)) { gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED); __napi_schedule(&gfargrp->napi); } else { /* * Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived. */ gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK); } spin_unlock_irqrestore(&gfargrp->grplock, flags); } /* Interrupt Handler for Transmit complete */ static irqreturn_t gfar_transmit(int irq, void *grp_id) { gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); return IRQ_HANDLED; } static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, struct sk_buff *skb) { struct net_device *dev = rx_queue->dev; struct gfar_private *priv = netdev_priv(dev); dma_addr_t buf; buf = dma_map_single(&priv->ofdev->dev, skb->data, priv->rx_buffer_size, DMA_FROM_DEVICE); gfar_init_rxbdp(rx_queue, bdp, buf); } struct sk_buff * gfar_new_skb(struct net_device *dev) { unsigned int alignamount; struct gfar_private *priv = netdev_priv(dev); struct sk_buff *skb = NULL; skb = __skb_dequeue(&priv->rx_recycle); if (!skb) skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); if (!skb) return NULL; alignamount = RXBUF_ALIGNMENT - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); /* We need the data buffer to be aligned properly. We will reserve * as many bytes as needed to align the data properly */ skb_reserve(skb, alignamount); GFAR_CB(skb)->alignamount = alignamount; return skb; } static inline void count_errors(unsigned short status, struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors * matter */ if (status & RXBD_TRUNCATED) { stats->rx_length_errors++; estats->rx_trunc++; return; } /* Count the errors, if there were any */ if (status & (RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++; if (status & RXBD_LARGE) estats->rx_large++; else estats->rx_short++; } if (status & RXBD_NONOCTET) { stats->rx_frame_errors++; estats->rx_nonoctet++; } if (status & RXBD_CRCERR) { estats->rx_crcerr++; stats->rx_crc_errors++; } if (status & RXBD_OVERRUN) { estats->rx_overrun++; stats->rx_crc_errors++; } } irqreturn_t gfar_receive(int irq, void *grp_id) { gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); return IRQ_HANDLED; } static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums * were verified, then we tell the kernel that no * checksumming is necessary. Otherwise, it is */ if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } /* gfar_process_frame() -- handle one incoming packet if skb * isn't NULL. */ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int amount_pull) { struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; int ret; /* fcb is at the beginning if exists */ fcb = (struct rxfcb *)skb->data; /* Remove the FCB from the skb */ /* Remove the padded bytes, if there are any */ if (amount_pull) { skb_record_rx_queue(skb, fcb->rq); skb_pull(skb, amount_pull); } /* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); u64 *ns = (u64 *) skb->data; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); shhwtstamps->hwtstamp = ns_to_ktime(*ns); } if (priv->padding) skb_pull(skb, priv->padding); if (priv->rx_csum_enable) gfar_rx_checksum(skb, fcb); /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, dev); /* Send the packet up the stack */ if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl); else ret = netif_receive_skb(skb); if (NET_RX_DROP == ret) priv->extra_stats.kernel_dropped++; return 0; } /* gfar_clean_rx_ring() -- Processes each frame in the rx ring * until the budget/quota has been reached. Returns the number * of frames handled */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { struct net_device *dev = rx_queue->dev; struct rxbd8 *bdp, *base; struct sk_buff *skb; int pkt_len; int amount_pull; int howmany = 0; struct gfar_private *priv = netdev_priv(dev); /* Get the first full descriptor */ bdp = rx_queue->cur_rx; base = rx_queue->rx_bd_base; amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0); while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { struct sk_buff *newskb; rmb(); /* Add another skb for the future */ newskb = gfar_new_skb(dev); skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, priv->rx_buffer_size, DMA_FROM_DEVICE); if (unlikely(!(bdp->status & RXBD_ERR) && bdp->length > priv->rx_buffer_size)) bdp->status = RXBD_LARGE; /* We drop the frame if we failed to allocate a new buffer */ if (unlikely(!newskb || !(bdp->status & RXBD_LAST) || bdp->status & RXBD_ERR)) { count_errors(bdp->status, dev); if (unlikely(!newskb)) newskb = skb; else if (skb) { /* * We need to un-reserve() the skb to what it * was before gfar_new_skb() re-aligned * it to an RXBUF_ALIGNMENT boundary * before we put the skb back on the * recycle list. */ skb_reserve(skb, -GFAR_CB(skb)->alignamount); __skb_queue_head(&priv->rx_recycle, skb); } } else { /* Increment the number of packets */ rx_queue->stats.rx_packets++; howmany++; if (likely(skb)) { pkt_len = bdp->length - ETH_FCS_LEN; /* Remove the FCS from the packet length */ skb_put(skb, pkt_len); rx_queue->stats.rx_bytes += pkt_len; skb_record_rx_queue(skb, rx_queue->qindex); gfar_process_frame(dev, skb, amount_pull); } else { if (netif_msg_rx_err(priv)) printk(KERN_WARNING "%s: Missing skb!\n", dev->name); rx_queue->stats.rx_dropped++; priv->extra_stats.rx_skbmissing++; } } rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; /* Setup the new bdp */ gfar_new_rxbdp(rx_queue, bdp, newskb); /* Update to the next pointer */ bdp = next_bd(bdp, base, rx_queue->rx_ring_size); /* update to point at the next skb */ rx_queue->skb_currx = (rx_queue->skb_currx + 1) & RX_RING_MOD_MASK(rx_queue->rx_ring_size); } /* Update the current rxbd pointer to be the next one */ rx_queue->cur_rx = bdp; return howmany; } static int gfar_poll(struct napi_struct *napi, int budget) { struct gfar_priv_grp *gfargrp = container_of(napi, struct gfar_priv_grp, napi); struct gfar_private *priv = gfargrp->priv; struct gfar __iomem *regs = gfargrp->regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; int tx_cleaned = 0, i, left_over_budget = budget; unsigned long serviced_queues = 0; int num_queues = 0; num_queues = gfargrp->num_rx_queues; budget_per_queue = budget/num_queues; /* Clear IEVENT, so interrupts aren't called again * because of the packets that have already arrived */ gfar_write(&regs->ievent, IEVENT_RTX_MASK); while (num_queues && left_over_budget) { budget_per_queue = left_over_budget/num_queues; left_over_budget = 0; for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { if (test_bit(i, &serviced_queues)) continue; rx_queue = priv->rx_queue[i]; tx_queue = priv->tx_queue[rx_queue->qindex]; tx_cleaned += gfar_clean_tx_ring(tx_queue); rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, budget_per_queue); rx_cleaned += rx_cleaned_per_queue; if(rx_cleaned_per_queue < budget_per_queue) { left_over_budget = left_over_budget + (budget_per_queue - rx_cleaned_per_queue); set_bit(i, &serviced_queues); num_queues--; } } } if (tx_cleaned) return budget; if (rx_cleaned < budget) { napi_complete(napi); /* Clear the halt bit in RSTAT */ gfar_write(&regs->rstat, gfargrp->rstat); gfar_write(&regs->imask, IMASK_DEFAULT); /* If we are coalescing interrupts, update the timer */ /* Otherwise, clear it */ gfar_configure_coalescing(priv, gfargrp->rx_bit_map, gfargrp->tx_bit_map); } return rx_cleaned; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ static void gfar_netpoll(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); int i = 0; /* If the device has multiple interrupts, run tx/rx */ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { for (i = 0; i < priv->num_grps; i++) { disable_irq(priv->gfargrp[i].interruptTransmit); disable_irq(priv->gfargrp[i].interruptReceive); disable_irq(priv->gfargrp[i].interruptError); gfar_interrupt(priv->gfargrp[i].interruptTransmit, &priv->gfargrp[i]); enable_irq(priv->gfargrp[i].interruptError); enable_irq(priv->gfargrp[i].interruptReceive); enable_irq(priv->gfargrp[i].interruptTransmit); } } else { for (i = 0; i < priv->num_grps; i++) { disable_irq(priv->gfargrp[i].interruptTransmit); gfar_interrupt(priv->gfargrp[i].interruptTransmit, &priv->gfargrp[i]); enable_irq(priv->gfargrp[i].interruptTransmit); } } } #endif /* The interrupt handler for devices with one interrupt */ static irqreturn_t gfar_interrupt(int irq, void *grp_id) { struct gfar_priv_grp *gfargrp = grp_id; /* Save ievent for future reference */ u32 events = gfar_read(&gfargrp->regs->ievent); /* Check for reception */ if (events & IEVENT_RX_MASK) gfar_receive(irq, grp_id); /* Check for transmit completion */ if (events & IEVENT_TX_MASK) gfar_transmit(irq, grp_id); /* Check for errors */ if (events & IEVENT_ERR_MASK) gfar_error(irq, grp_id); return IRQ_HANDLED; } /* Called every time the controller might need to be made * aware of new link state. The PHY code conveys this * information through variables in the phydev structure, and this * function converts those variables into the appropriate * register values, and can bring down the device if needed. */ static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; unsigned long flags; struct phy_device *phydev = priv->phydev; int new_state = 0; local_irq_save(flags); lock_tx_qs(priv); if (phydev->link) { u32 tempval = gfar_read(&regs->maccfg2); u32 ecntrl = gfar_read(&regs->ecntrl); /* Now we make sure that we can be in full duplex mode. * If not, we operate in half-duplex mode. */ if (phydev->duplex != priv->oldduplex) { new_state = 1; if (!(phydev->duplex)) tempval &= ~(MACCFG2_FULL_DUPLEX); else tempval |= MACCFG2_FULL_DUPLEX; priv->oldduplex = phydev->duplex; } if (phydev->speed != priv->oldspeed) { new_state = 1; switch (phydev->speed) { case 1000: tempval = ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); ecntrl &= ~(ECNTRL_R100); break; case 100: case 10: tempval = ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); /* Reduced mode distinguishes * between 10 and 100 */ if (phydev->speed == SPEED_100) ecntrl |= ECNTRL_R100; else ecntrl &= ~(ECNTRL_R100); break; default: if (netif_msg_link(priv)) printk(KERN_WARNING "%s: Ack! Speed (%d) is not 10/100/1000!\n", dev->name, phydev->speed); break; } priv->oldspeed = phydev->speed; } gfar_write(&regs->maccfg2, tempval); gfar_write(&regs->ecntrl, ecntrl); if (!priv->oldlink) { new_state = 1; priv->oldlink = 1; } } else if (priv->oldlink) { new_state = 1; priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; } if (new_state && netif_msg_link(priv)) phy_print_status(phydev); unlock_tx_qs(priv); local_irq_restore(flags); } /* Update the hash table based on the current list of multicast * addresses we subscribe to. Also, change the promiscuity of * the device based on the flags (this function is called * whenever dev->flags is changed */ static void gfar_set_multi(struct net_device *dev) { struct netdev_hw_addr *ha; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; if (dev->flags & IFF_PROMISC) { /* Set RCTRL to PROM */ tempval = gfar_read(&regs->rctrl); tempval |= RCTRL_PROM; gfar_write(&regs->rctrl, tempval); } else { /* Set RCTRL to not PROM */ tempval = gfar_read(&regs->rctrl); tempval &= ~(RCTRL_PROM); gfar_write(&regs->rctrl, tempval); } if (dev->flags & IFF_ALLMULTI) { /* Set the hash to rx all multicast frames */ gfar_write(&regs->igaddr0, 0xffffffff); gfar_write(&regs->igaddr1, 0xffffffff); gfar_write(&regs->igaddr2, 0xffffffff); gfar_write(&regs->igaddr3, 0xffffffff); gfar_write(&regs->igaddr4, 0xffffffff); gfar_write(&regs->igaddr5, 0xffffffff); gfar_write(&regs->igaddr6, 0xffffffff); gfar_write(&regs->igaddr7, 0xffffffff); gfar_write(&regs->gaddr0, 0xffffffff); gfar_write(&regs->gaddr1, 0xffffffff); gfar_write(&regs->gaddr2, 0xffffffff); gfar_write(&regs->gaddr3, 0xffffffff); gfar_write(&regs->gaddr4, 0xffffffff); gfar_write(&regs->gaddr5, 0xffffffff); gfar_write(&regs->gaddr6, 0xffffffff); gfar_write(&regs->gaddr7, 0xffffffff); } else { int em_num; int idx; /* zero out the hash */ gfar_write(&regs->igaddr0, 0x0); gfar_write(&regs->igaddr1, 0x0); gfar_write(&regs->igaddr2, 0x0); gfar_write(&regs->igaddr3, 0x0); gfar_write(&regs->igaddr4, 0x0); gfar_write(&regs->igaddr5, 0x0); gfar_write(&regs->igaddr6, 0x0); gfar_write(&regs->igaddr7, 0x0); gfar_write(&regs->gaddr0, 0x0); gfar_write(&regs->gaddr1, 0x0); gfar_write(&regs->gaddr2, 0x0); gfar_write(&regs->gaddr3, 0x0); gfar_write(&regs->gaddr4, 0x0); gfar_write(&regs->gaddr5, 0x0); gfar_write(&regs->gaddr6, 0x0); gfar_write(&regs->gaddr7, 0x0); /* If we have extended hash tables, we need to * clear the exact match registers to prepare for * setting them */ if (priv->extended_hash) { em_num = GFAR_EM_NUM + 1; gfar_clear_exact_match(dev); idx = 1; } else { idx = 0; em_num = 0; } if (netdev_mc_empty(dev)) return; /* Parse the list, and set the appropriate bits */ netdev_for_each_mc_addr(ha, dev) { if (idx < em_num) { gfar_set_mac_for_addr(dev, idx, ha->addr); idx++; } else gfar_set_hash_for_addr(dev, ha->addr); } } } /* Clears each of the exact match registers to zero, so they * don't interfere with normal reception */ static void gfar_clear_exact_match(struct net_device *dev) { int idx; u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); } /* Set the appropriate hash bit for the given addr */ /* The algorithm works like so: * 1) Take the Destination Address (ie the multicast address), and * do a CRC on it (little endian), and reverse the bits of the * result. * 2) Use the 8 most significant bits as a hash into a 256-entry * table. The table is controlled through 8 32-bit registers: * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is * gaddr7. This means that the 3 most significant bits in the * hash index which gaddr register to use, and the 5 other bits * indicate which bit (assuming an IBM numbering scheme, which * for PowerPC (tm) is usually the case) in the register holds * the entry. */ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) { u32 tempval; struct gfar_private *priv = netdev_priv(dev); u32 result = ether_crc(MAC_ADDR_LEN, addr); int width = priv->hash_width; u8 whichbit = (result >> (32 - width)) & 0x1f; u8 whichreg = result >> (32 - width + 5); u32 value = (1 << (31-whichbit)); tempval = gfar_read(priv->hash_regs[whichreg]); tempval |= value; gfar_write(priv->hash_regs[whichreg], tempval); } /* There are multiple MAC Address register pairs on some controllers * This function sets the numth pair to a given address */ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) { struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; int idx; char tmpbuf[MAC_ADDR_LEN]; u32 tempval; u32 __iomem *macptr = &regs->macstnaddr1; macptr += num*2; /* Now copy it into the mac registers backwards, cuz */ /* little endian is silly */ for (idx = 0; idx < MAC_ADDR_LEN; idx++) tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx]; gfar_write(macptr, *((u32 *) (tmpbuf))); tempval = *((u32 *) (tmpbuf + 4)); gfar_write(macptr+1, tempval); } /* GFAR error interrupt handler */ static irqreturn_t gfar_error(int irq, void *grp_id) { struct gfar_priv_grp *gfargrp = grp_id; struct gfar __iomem *regs = gfargrp->regs; struct gfar_private *priv= gfargrp->priv; struct net_device *dev = priv->ndev; /* Save ievent for future reference */ u32 events = gfar_read(&regs->ievent); /* Clear IEVENT */ gfar_write(&regs->ievent, events & IEVENT_ERR_MASK); /* Magic Packet is not an error. */ if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && (events & IEVENT_MAG)) events &= ~IEVENT_MAG; /* Hmm... */ if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", dev->name, events, gfar_read(&regs->imask)); /* Update the error counters */ if (events & IEVENT_TXE) { dev->stats.tx_errors++; if (events & IEVENT_LC) dev->stats.tx_window_errors++; if (events & IEVENT_CRL) dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { unsigned long flags; if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: TX FIFO underrun, " "packet dropped.\n", dev->name); dev->stats.tx_dropped++; priv->extra_stats.tx_underrun++; local_irq_save(flags); lock_tx_qs(priv); /* Reactivate the Tx Queues */ gfar_write(&regs->tstat, gfargrp->tstat); unlock_tx_qs(priv); local_irq_restore(flags); } if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); } if (events & IEVENT_BSY) { dev->stats.rx_errors++; priv->extra_stats.rx_bsy++; gfar_receive(irq, grp_id); if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", dev->name, gfar_read(&regs->rstat)); } if (events & IEVENT_BABR) { dev->stats.rx_errors++; priv->extra_stats.rx_babr++; if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: babbling RX error\n", dev->name); } if (events & IEVENT_EBERR) { priv->extra_stats.eberr++; if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: bus error\n", dev->name); } if ((events & IEVENT_RXC) && netif_msg_rx_status(priv)) printk(KERN_DEBUG "%s: control frame\n", dev->name); if (events & IEVENT_BABT) { priv->extra_stats.tx_babt++; if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: babbling TX error\n", dev->name); } return IRQ_HANDLED; } static struct of_device_id gfar_match[] = { { .type = "network", .compatible = "gianfar", }, { .compatible = "fsl,etsec2", }, {}, }; MODULE_DEVICE_TABLE(of, gfar_match); /* Structure for a device driver */ static struct of_platform_driver gfar_driver = { .driver = { .name = "fsl-gianfar", .owner = THIS_MODULE, .pm = GFAR_PM_OPS, .of_match_table = gfar_match, }, .probe = gfar_probe, .remove = gfar_remove, }; static int __init gfar_init(void) { return of_register_platform_driver(&gfar_driver); } static void __exit gfar_exit(void) { of_unregister_platform_driver(&gfar_driver); } module_init(gfar_init); module_exit(gfar_exit);
gpl-2.0
licheegh/openwrt
target/linux/adm5120/files-3.18/arch/mips/adm5120/mikrotik/rb-153.c
481
2046
/* * Mikrotik RouterBOARD 153 support * * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include "rb-1xx.h" static struct resource rb153_cf_resources[] __initdata = { { .name = "cf_membase", .start = ADM5120_EXTIO1_BASE, .end = ADM5120_EXTIO1_BASE + ADM5120_EXTIO1_SIZE-1 , .flags = IORESOURCE_MEM }, { .name = "cf_irq", .start = ADM5120_IRQ_GPIO4, .end = ADM5120_IRQ_GPIO4, .flags = IORESOURCE_IRQ } }; static struct gpio_led rb153_gpio_leds[] __initdata = { GPIO_LED_STD(ADM5120_GPIO_PIN5, "user", NULL), GPIO_LED_INV(ADM5120_GPIO_P0L1, "lan1_speed", NULL), GPIO_LED_INV(ADM5120_GPIO_P0L0, "lan1_lnkact", NULL), GPIO_LED_INV(ADM5120_GPIO_P1L1, "lan5_speed", NULL), GPIO_LED_INV(ADM5120_GPIO_P1L0, "lan5_lnkact", NULL), GPIO_LED_INV(ADM5120_GPIO_P2L1, "lan4_speed", NULL), GPIO_LED_INV(ADM5120_GPIO_P2L0, "lan4_lnkact", NULL), GPIO_LED_INV(ADM5120_GPIO_P3L1, "lan3_speed", NULL), GPIO_LED_INV(ADM5120_GPIO_P3L0, "lan3_lnkact", NULL), GPIO_LED_INV(ADM5120_GPIO_P4L1, "lan2_speed", NULL), GPIO_LED_INV(ADM5120_GPIO_P4L0, "lan2_lnkact", NULL), }; static u8 rb153_vlans[6] __initdata = { 0x7F, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void __init rb153_add_device_cf(void) { /* enable CSX1:INTX1 on GPIO[3:4] for the CF slot */ adm5120_gpio_csx1_enable(); /* enable the wait state pin GPIO[0] for external I/O control */ adm5120_gpio_ew_enable(); platform_device_register_simple("pata-rb153-cf", -1, rb153_cf_resources, ARRAY_SIZE(rb153_cf_resources)); } static void __init rb153_setup(void) { rb1xx_generic_setup(); rb1xx_add_device_nand(); rb153_add_device_cf(); adm5120_add_device_gpio_leds(ARRAY_SIZE(rb153_gpio_leds), rb153_gpio_leds); adm5120_add_device_switch(5, rb153_vlans); } MIPS_MACHINE(MACH_ADM5120_RB_153, "150", "Mikrotik RouterBOARD 153", rb153_setup);
gpl-2.0
jmaurice/android_kernel_samsung_jfltedcm_old
arch/arm/mach-msm/devices-msm7x30.c
737
29791
/* * Copyright (C) 2008 Google, Inc. * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/msm_rotator.h> #include <linux/dma-mapping.h> #include <mach/kgsl.h> #include <linux/android_pmem.h> #include <linux/regulator/machine.h> #include <linux/init.h> #include <mach/irqs.h> #include <mach/msm_iomap.h> #include <mach/dma.h> #include <mach/board.h> #include <asm/clkdev.h> #include <linux/msm_ion.h> #include "devices.h" #include "footswitch.h" #include <asm/mach/flash.h> #include <asm/mach/mmc.h> #include <mach/msm_hsusb.h> #ifdef CONFIG_PMIC8058 #include <linux/mfd/pmic8058.h> #endif #include <mach/dal_axi.h> #include <mach/msm_memtypes.h> #include "pm.h" #include "irq.h" struct platform_device msm7x30_device_acpuclk = { .name = "acpuclk-7x30", .id = -1, }; /* EBI THERMAL DRIVER */ static struct resource msm_ebi0_thermal_resources[] = { { .start = 0xA8600000, .end = 0xA86005FF, .name = "physbase", .flags = IORESOURCE_MEM } }; struct platform_device msm_ebi0_thermal = { .name = "msm_popmem-tm", .id = 0, .num_resources = 1, .resource = msm_ebi0_thermal_resources }; static struct resource msm_ebi1_thermal_resources[] = { { .start = 0xA8700000, .end = 0xA87005FF, .name = "physbase", .flags = IORESOURCE_MEM } }; struct platform_device msm_ebi1_thermal = { .name = "msm_popmem-tm", .id = 1, .num_resources = 1, .resource = msm_ebi1_thermal_resources }; static struct resource resources_adsp[] = { { .start = INT_ADSP_A9_A11, .end = INT_ADSP_A9_A11, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_adsp_device = { .name = "msm_adsp", .id = -1, .num_resources = ARRAY_SIZE(resources_adsp), .resource = resources_adsp, }; static struct resource resources_uart1[] = { { .start = INT_UART1, .end = INT_UART1, .flags = IORESOURCE_IRQ, }, { .start = MSM7X30_UART1_PHYS, .end = MSM7X30_UART1_PHYS + MSM7X30_UART1_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource resources_uart2[] = { { .start = INT_UART2, .end = INT_UART2, .flags = IORESOURCE_IRQ, }, { .start = MSM7X30_UART2_PHYS, .end = MSM7X30_UART2_PHYS + MSM7X30_UART2_SIZE - 1, .flags = IORESOURCE_MEM, .name = "uart_resource" }, }; static struct resource resources_uart3[] = { { .start = INT_UART3, .end = INT_UART3, .flags = IORESOURCE_IRQ, }, { .start = MSM7X30_UART3_PHYS, .end = MSM7X30_UART3_PHYS + MSM7X30_UART3_SIZE - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device msm_device_uart1 = { .name = "msm_serial", .id = 0, .num_resources = ARRAY_SIZE(resources_uart1), .resource = resources_uart1, }; struct platform_device msm_device_uart2 = { .name = "msm_serial", .id = 1, .num_resources = ARRAY_SIZE(resources_uart2), .resource = resources_uart2, }; struct platform_device msm_device_uart3 = { .name = "msm_serial", .id = 2, .num_resources = ARRAY_SIZE(resources_uart3), .resource = resources_uart3, }; #define MSM_UART1DM_PHYS 0xA3300000 #define MSM_UART2DM_PHYS 0xA3200000 static struct resource msm_uart1_dm_resources[] = { { .start = MSM_UART1DM_PHYS, .end = MSM_UART1DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART1DM_IRQ, .end = INT_UART1DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART1DM_RX, .end = INT_UART1DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART1_TX_CHAN, .end = DMOV_HSUART1_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART1_TX_CRCI, .end = DMOV_HSUART1_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm1 = { .name = "msm_serial_hs", .id = 0, .num_resources = ARRAY_SIZE(msm_uart1_dm_resources), .resource = msm_uart1_dm_resources, .dev = { .dma_mask = &msm_uart_dm1_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; static struct resource msm_uart2_dm_resources[] = { { .start = MSM_UART2DM_PHYS, .end = MSM_UART2DM_PHYS + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_UART2DM_IRQ, .end = INT_UART2DM_IRQ, .flags = IORESOURCE_IRQ, }, { .start = INT_UART2DM_RX, .end = INT_UART2DM_RX, .flags = IORESOURCE_IRQ, }, { .start = DMOV_HSUART2_TX_CHAN, .end = DMOV_HSUART2_RX_CHAN, .name = "uartdm_channels", .flags = IORESOURCE_DMA, }, { .start = DMOV_HSUART2_TX_CRCI, .end = DMOV_HSUART2_RX_CRCI, .name = "uartdm_crci", .flags = IORESOURCE_DMA, }, }; static u64 msm_uart_dm2_dma_mask = DMA_BIT_MASK(32); struct platform_device msm_device_uart_dm2 = { .name = "msm_serial_hs", .id = 1, .num_resources = ARRAY_SIZE(msm_uart2_dm_resources), .resource = msm_uart2_dm_resources, .dev = { .dma_mask = &msm_uart_dm2_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, }; #define MSM_I2C_SIZE SZ_4K #define MSM_I2C_PHYS 0xACD00000 #define MSM_I2C_2_PHYS 0xACF00000 static struct resource resources_i2c_2[] = { { .start = MSM_I2C_2_PHYS, .end = MSM_I2C_2_PHYS + MSM_I2C_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_PWB_I2C_2, .end = INT_PWB_I2C_2, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_i2c_2 = { .name = "msm_i2c", .id = 2, .num_resources = ARRAY_SIZE(resources_i2c_2), .resource = resources_i2c_2, }; static struct resource resources_i2c[] = { { .start = MSM_I2C_PHYS, .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1, .flags = IORESOURCE_MEM, }, { .start = INT_PWB_I2C, .end = INT_PWB_I2C, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_i2c = { .name = "msm_i2c", .id = 0, .num_resources = ARRAY_SIZE(resources_i2c), .resource = resources_i2c, }; #ifdef CONFIG_MSM_CAMERA_V4L2 static struct resource msm_csic_resources[] = { { .name = "csic", .start = 0xA6100000, .end = 0xA6100000 + 0x00000400 - 1, .flags = IORESOURCE_MEM, }, { .name = "csic", .start = INT_CSI, .end = INT_CSI, .flags = IORESOURCE_IRQ, }, }; struct resource msm_vfe_resources[] = { { .name = "msm_vfe", .start = 0xA6000000, .end = 0xA6000000 + SZ_1M - 1, .flags = IORESOURCE_MEM, }, { .name = "msm_vfe", .start = INT_VFE, .end = INT_VFE, .flags = IORESOURCE_IRQ, }, { .name = "msm_camif", .start = 0xAB000000, .end = 0xAB000000 + SZ_1K - 1, .flags = IORESOURCE_MEM, }, }; static struct resource msm_vpe_resources[] = { { .name = "vpe", .start = 0xAD200000, .end = 0xAD200000 + SZ_1M - 1, .flags = IORESOURCE_MEM, }, { .name = "vpe", .start = INT_VPE, .end = INT_VPE, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_csic0 = { .name = "msm_csic", .id = 0, .resource = msm_csic_resources, .num_resources = ARRAY_SIZE(msm_csic_resources), }; struct platform_device msm_device_vfe = { .name = "msm_vfe", .id = 0, .resource = msm_vfe_resources, .num_resources = ARRAY_SIZE(msm_vfe_resources), }; struct platform_device msm_device_vpe = { .name = "msm_vpe", .id = 0, .resource = msm_vpe_resources, .num_resources = ARRAY_SIZE(msm_vpe_resources), }; #endif #define MSM_QUP_PHYS 0xA8301000 #define MSM_GSBI_QUP_I2C_PHYS 0xA8300000 #define MSM_QUP_SIZE SZ_4K static struct resource resources_qup[] = { { .name = "qup_phys_addr", .start = MSM_QUP_PHYS, .end = MSM_QUP_PHYS + MSM_QUP_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "gsbi_qup_i2c_addr", .start = MSM_GSBI_QUP_I2C_PHYS, .end = MSM_GSBI_QUP_I2C_PHYS + 4 - 1, .flags = IORESOURCE_MEM, }, { .name = "qup_in_intr", .start = INT_PWB_QUP_IN, .end = INT_PWB_QUP_IN, .flags = IORESOURCE_IRQ, }, { .name = "qup_out_intr", .start = INT_PWB_QUP_OUT, .end = INT_PWB_QUP_OUT, .flags = IORESOURCE_IRQ, }, { .name = "qup_err_intr", .start = INT_PWB_QUP_ERR, .end = INT_PWB_QUP_ERR, .flags = IORESOURCE_IRQ, }, }; struct platform_device qup_device_i2c = { .name = "qup_i2c", .id = 4, .num_resources = ARRAY_SIZE(resources_qup), .resource = resources_qup, }; #ifdef CONFIG_MSM_SSBI #define MSM_SSBI_PMIC1_PHYS 0xAD900000 static struct resource msm_ssbi_pmic1_resources[] = { { .start = MSM_SSBI_PMIC1_PHYS, .end = MSM_SSBI_PMIC1_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device msm_device_ssbi_pmic1 = { .name = "msm_ssbi", .id = 0, .resource = msm_ssbi_pmic1_resources, .num_resources = ARRAY_SIZE(msm_ssbi_pmic1_resources), }; #endif #ifdef CONFIG_I2C_SSBI #define MSM_SSBI7_PHYS 0xAC800000 static struct resource msm_ssbi7_resources[] = { { .name = "ssbi_base", .start = MSM_SSBI7_PHYS, .end = MSM_SSBI7_PHYS + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; struct platform_device msm_device_ssbi7 = { .name = "i2c_ssbi", .id = 7, .num_resources = ARRAY_SIZE(msm_ssbi7_resources), .resource = msm_ssbi7_resources, }; #endif /* CONFIG_I2C_SSBI */ #define MSM_HSUSB_PHYS 0xA3600000 static struct resource resources_hsusb_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static u64 dma_mask = 0xffffffffULL; struct platform_device msm_device_hsusb_otg = { .name = "msm_hsusb_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_otg), .resource = resources_hsusb_otg, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_hsusb_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; static struct resource resources_gadget_peripheral[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_peripheral = { .name = "msm_hsusb_peripheral", .id = -1, .num_resources = ARRAY_SIZE(resources_hsusb_peripheral), .resource = resources_hsusb_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; struct platform_device msm_device_gadget_peripheral = { .name = "msm_hsusb", .id = -1, .num_resources = ARRAY_SIZE(resources_gadget_peripheral), .resource = resources_gadget_peripheral, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct resource resources_hsusb_host[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_hsusb_host = { .name = "msm_hsusb_host", .id = 0, .num_resources = ARRAY_SIZE(resources_hsusb_host), .resource = resources_hsusb_host, .dev = { .dma_mask = &dma_mask, .coherent_dma_mask = 0xffffffffULL, }, }; static struct platform_device *msm_host_devices[] = { &msm_device_hsusb_host, }; int msm_add_host(unsigned int host, struct msm_usb_host_platform_data *plat) { struct platform_device *pdev; pdev = msm_host_devices[host]; if (!pdev) return -ENODEV; pdev->dev.platform_data = plat; return platform_device_register(pdev); } struct platform_device asoc_msm_pcm = { .name = "msm-dsp-audio", .id = 0, }; struct platform_device asoc_msm_dai0 = { .name = "msm-codec-dai", .id = 0, }; struct platform_device asoc_msm_dai1 = { .name = "msm-cpu-dai", .id = 0, }; #if defined (CONFIG_SND_MSM_MVS_DAI_SOC) struct platform_device asoc_msm_mvs = { .name = "msm-mvs-audio", .id = 0, }; struct platform_device asoc_mvs_dai0 = { .name = "mvs-codec-dai", .id = 0, }; struct platform_device asoc_mvs_dai1 = { .name = "mvs-cpu-dai", .id = 0, }; #endif #define MSM_NAND_PHYS 0xA0200000 #define MSM_NANDC01_PHYS 0xA0240000 #define MSM_NANDC10_PHYS 0xA0280000 #define MSM_NANDC11_PHYS 0xA02C0000 #define EBI2_REG_BASE 0xA0000000 static struct resource resources_nand[] = { [0] = { .name = "msm_nand_dmac", .start = DMOV_NAND_CHAN, .end = DMOV_NAND_CHAN, .flags = IORESOURCE_DMA, }, [1] = { .name = "msm_nand_phys", .start = MSM_NAND_PHYS, .end = MSM_NAND_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, [2] = { .name = "msm_nandc01_phys", .start = MSM_NANDC01_PHYS, .end = MSM_NANDC01_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, [3] = { .name = "msm_nandc10_phys", .start = MSM_NANDC10_PHYS, .end = MSM_NANDC10_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, [4] = { .name = "msm_nandc11_phys", .start = MSM_NANDC11_PHYS, .end = MSM_NANDC11_PHYS + 0x7FF, .flags = IORESOURCE_MEM, }, [5] = { .name = "ebi2_reg_base", .start = EBI2_REG_BASE, .end = EBI2_REG_BASE + 0x60, .flags = IORESOURCE_MEM, }, }; static struct resource resources_otg[] = { { .start = MSM_HSUSB_PHYS, .end = MSM_HSUSB_PHYS + SZ_1K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_USB_HS, .end = INT_USB_HS, .flags = IORESOURCE_IRQ, }, { .name = "vbus_on", .start = PMIC8058_IRQ_BASE + PM8058_CHGVAL_IRQ, .end = PMIC8058_IRQ_BASE + PM8058_CHGVAL_IRQ, .flags = IORESOURCE_IRQ, }, }; struct platform_device msm_device_otg = { .name = "msm_otg", .id = -1, .num_resources = ARRAY_SIZE(resources_otg), .resource = resources_otg, .dev = { .coherent_dma_mask = 0xffffffffULL, }, }; struct flash_platform_data msm_nand_data = { .version = VERSION_2, }; struct platform_device msm_device_nand = { .name = "msm_nand", .id = -1, .num_resources = ARRAY_SIZE(resources_nand), .resource = resources_nand, .dev = { .platform_data = &msm_nand_data, }, }; static struct msm_pm_irq_calls msm7x30_pm_irq_calls = { .irq_pending = msm_irq_pending, .idle_sleep_allowed = msm_irq_idle_sleep_allowed, .enter_sleep1 = msm_irq_enter_sleep1, .enter_sleep2 = msm_irq_enter_sleep2, .exit_sleep1 = msm_irq_exit_sleep1, .exit_sleep2 = msm_irq_exit_sleep2, .exit_sleep3 = msm_irq_exit_sleep3, }; void __init msm_pm_register_irqs(void) { msm_pm_set_irq_extns(&msm7x30_pm_irq_calls); } static struct resource smd_resource[] = { { .name = "a9_m2a_0", .start = INT_A9_M2A_0, .flags = IORESOURCE_IRQ, }, { .name = "a9_m2a_5", .start = INT_A9_M2A_5, .flags = IORESOURCE_IRQ, }, { .name = "adsp_a11_smsm", .start = INT_ADSP_A11, .flags = IORESOURCE_IRQ, }, }; static struct smd_subsystem_config smd_config_list[] = { { .irq_config_id = SMD_MODEM, .subsys_name = "modem", .edge = SMD_APPS_MODEM, .smd_int.irq_name = "a9_m2a_0", .smd_int.flags = IRQF_TRIGGER_RISING, .smd_int.irq_id = -1, .smd_int.device_name = "smd_dev", .smd_int.dev_id = 0, .smd_int.out_bit_pos = 1 << 0, .smd_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE, .smd_int.out_offset = 0x8, .smsm_int.irq_name = "a9_m2a_5", .smsm_int.flags = IRQF_TRIGGER_RISING, .smsm_int.irq_id = -1, .smsm_int.device_name = "smd_dev", .smsm_int.dev_id = 0, .smsm_int.out_bit_pos = 1 << 5, .smsm_int.out_base = (void __iomem *)MSM_APCS_GCC_BASE, .smsm_int.out_offset = 0x8, } }; static struct smd_platform smd_platform_data = { .num_ss_configs = ARRAY_SIZE(smd_config_list), .smd_ss_configs = smd_config_list, }; struct platform_device msm_device_smd = { .name = "msm_smd", .id = -1, .resource = smd_resource, .num_resources = ARRAY_SIZE(smd_resource), .dev = { .platform_data = &smd_platform_data, } }; static struct resource msm_dmov_resource[] = { { .start = INT_ADM_AARM, .flags = IORESOURCE_IRQ, }, { .start = 0xAC400000, .end = 0xAC400000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, }; static struct msm_dmov_pdata msm_dmov_pdata = { .sd = 2, .sd_size = 0x400, }; struct platform_device msm_device_dmov = { .name = "msm_dmov", .id = -1, .resource = msm_dmov_resource, .num_resources = ARRAY_SIZE(msm_dmov_resource), .dev = { .platform_data = &msm_dmov_pdata, }, }; #define MSM_SDC1_BASE 0xA0400000 #define MSM_SDC2_BASE 0xA0500000 #define MSM_SDC3_BASE 0xA3000000 #define MSM_SDC4_BASE 0xA3100000 static struct resource resources_sdc1[] = { { .name = "core_mem", .start = MSM_SDC1_BASE, .end = MSM_SDC1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC1_0, .end = INT_SDC1_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC1_CHAN, .end = DMOV_SDC1_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC1_CRCI, .end = DMOV_SDC1_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc2[] = { { .name = "core_mem", .start = MSM_SDC2_BASE, .end = MSM_SDC2_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC2_0, .end = INT_SDC2_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_NAND_CHAN, .end = DMOV_NAND_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC2_CRCI, .end = DMOV_SDC2_CRCI, .flags = IORESOURCE_DMA, } }; static struct resource resources_sdc3[] = { { .name = "core_mem", .start = MSM_SDC3_BASE, .end = MSM_SDC3_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC3_0, .end = INT_SDC3_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC3_CHAN, .end = DMOV_SDC3_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC3_CRCI, .end = DMOV_SDC3_CRCI, .flags = IORESOURCE_DMA, }, }; static struct resource resources_sdc4[] = { { .name = "core_mem", .start = MSM_SDC4_BASE, .end = MSM_SDC4_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .name = "core_irq", .start = INT_SDC4_0, .end = INT_SDC4_1, .flags = IORESOURCE_IRQ, }, { .name = "dma_chnl", .start = DMOV_SDC4_CHAN, .end = DMOV_SDC4_CHAN, .flags = IORESOURCE_DMA, }, { .name = "dma_crci", .start = DMOV_SDC4_CRCI, .end = DMOV_SDC4_CRCI, .flags = IORESOURCE_DMA, }, }; struct platform_device msm_device_sdc1 = { .name = "msm_sdcc", .id = 1, .num_resources = ARRAY_SIZE(resources_sdc1), .resource = resources_sdc1, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc2 = { .name = "msm_sdcc", .id = 2, .num_resources = ARRAY_SIZE(resources_sdc2), .resource = resources_sdc2, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc3 = { .name = "msm_sdcc", .id = 3, .num_resources = ARRAY_SIZE(resources_sdc3), .resource = resources_sdc3, .dev = { .coherent_dma_mask = 0xffffffff, }, }; struct platform_device msm_device_sdc4 = { .name = "msm_sdcc", .id = 4, .num_resources = ARRAY_SIZE(resources_sdc4), .resource = resources_sdc4, .dev = { .coherent_dma_mask = 0xffffffff, }, }; static struct platform_device *msm_sdcc_devices[] __initdata = { &msm_device_sdc1, &msm_device_sdc2, &msm_device_sdc3, &msm_device_sdc4, }; int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat) { struct platform_device *pdev; if (controller < 1 || controller > 4) return -EINVAL; pdev = msm_sdcc_devices[controller-1]; pdev->dev.platform_data = plat; return platform_device_register(pdev); } static struct resource msm_vidc_720p_resources[] = { { .start = 0xA3B00000, .end = 0xA3B00000 + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = INT_MFC720, .end = INT_MFC720, .flags = IORESOURCE_IRQ, }, }; struct msm_vidc_platform_data vidc_platform_data = { .memtype = ION_CAMERA_HEAP_ID, .enable_ion = 1, .disable_dmx = 0, .cont_mode_dpb_count = 8 }; struct platform_device msm_device_vidc_720p = { .name = "msm_vidc", .id = 0, .num_resources = ARRAY_SIZE(msm_vidc_720p_resources), .resource = msm_vidc_720p_resources, .dev = { .platform_data = &vidc_platform_data, }, }; #if defined(CONFIG_FB_MSM_MDP40) #define MDP_BASE 0xA3F00000 #define PMDH_BASE 0xAD600000 #define EMDH_BASE 0xAD700000 #define TVENC_BASE 0xAD400000 #else #define MDP_BASE 0xAA200000 #define PMDH_BASE 0xAA600000 #define EMDH_BASE 0xAA700000 #define TVENC_BASE 0xAA400000 #endif static struct resource msm_mdp_resources[] = { { .name = "mdp", .start = MDP_BASE, .end = MDP_BASE + 0x000F0000 - 1, .flags = IORESOURCE_MEM, }, { .start = INT_MDP, .end = INT_MDP, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_mddi_resources[] = { { .name = "pmdh", .start = PMDH_BASE, .end = PMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_mddi_ext_resources[] = { { .name = "emdh", .start = EMDH_BASE, .end = EMDH_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct resource msm_ebi2_lcd_resources[] = { { .name = "base", .start = 0xa0d00000, .end = 0xa0d00000 + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd01", .start = 0x98000000, .end = 0x98000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, { .name = "lcd02", .start = 0x9c000000, .end = 0x9c000000 + 0x80000 - 1, .flags = IORESOURCE_MEM, }, }; static struct resource msm_tvenc_resources[] = { { .name = "tvenc", .start = TVENC_BASE, .end = TVENC_BASE + PAGE_SIZE - 1, .flags = IORESOURCE_MEM, } }; #ifdef CONFIG_FB_MSM_TVOUT static struct resource tvout_device_resources[] = { { .name = "tvout_device_irq", .start = INT_TV_ENC, .end = INT_TV_ENC, .flags = IORESOURCE_IRQ, }, }; #endif static struct platform_device msm_mdp_device = { .name = "mdp", .id = 0, .num_resources = ARRAY_SIZE(msm_mdp_resources), .resource = msm_mdp_resources, }; static struct platform_device msm_mddi_device = { .name = "mddi", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_resources), .resource = msm_mddi_resources, }; static struct platform_device msm_mddi_ext_device = { .name = "mddi_ext", .id = 0, .num_resources = ARRAY_SIZE(msm_mddi_ext_resources), .resource = msm_mddi_ext_resources, }; static struct platform_device msm_ebi2_lcd_device = { .name = "ebi2_lcd", .id = 0, .num_resources = ARRAY_SIZE(msm_ebi2_lcd_resources), .resource = msm_ebi2_lcd_resources, }; static struct platform_device msm_lcdc_device = { .name = "lcdc", .id = 0, }; static struct platform_device msm_dtv_device = { .name = "dtv", .id = 0, }; static struct platform_device msm_tvenc_device = { .name = "tvenc", .id = 0, .num_resources = ARRAY_SIZE(msm_tvenc_resources), .resource = msm_tvenc_resources, }; #ifdef CONFIG_FB_MSM_TVOUT static struct platform_device tvout_msm_device = { .name = "tvout_device", .id = 0, .num_resources = ARRAY_SIZE(tvout_device_resources), .resource = tvout_device_resources, }; #endif /* TSIF begin */ #if defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) #define MSM_TSIF_PHYS (0xa3400000) #define MSM_TSIF_SIZE (0x200) static struct resource tsif_resources[] = { [0] = { .flags = IORESOURCE_IRQ, .start = INT_TSIF, .end = INT_TSIF, }, [1] = { .flags = IORESOURCE_MEM, .start = MSM_TSIF_PHYS, .end = MSM_TSIF_PHYS + MSM_TSIF_SIZE - 1, }, [2] = { .flags = IORESOURCE_DMA, .start = DMOV_TSIF_CHAN, .end = DMOV_TSIF_CRCI, }, }; static void tsif_release(struct device *dev) { dev_info(dev, "release\n"); } struct platform_device msm_device_tsif = { .name = "msm_tsif", .id = 0, .num_resources = ARRAY_SIZE(tsif_resources), .resource = tsif_resources, .dev = { .release = tsif_release, }, }; #endif /* defined(CONFIG_TSIF) || defined(CONFIG_TSIF_MODULE) */ /* TSIF end */ #ifdef CONFIG_MSM_ROTATOR static struct resource resources_msm_rotator[] = { { .start = 0xA3E00000, .end = 0xA3F00000 - 1, .flags = IORESOURCE_MEM, }, { .start = INT_ROTATOR, .end = INT_ROTATOR, .flags = IORESOURCE_IRQ, }, }; static struct msm_rot_clocks rotator_clocks[] = { { .clk_name = "core_clk", .clk_type = ROTATOR_CORE_CLK, .clk_rate = 0, }, { .clk_name = "iface_clk", .clk_type = ROTATOR_PCLK, .clk_rate = 0, }, { .clk_name = "mem_clk", .clk_type = ROTATOR_IMEM_CLK, .clk_rate = 0, }, }; static struct msm_rotator_platform_data rotator_pdata = { .number_of_clocks = ARRAY_SIZE(rotator_clocks), .hardware_version_number = 0x1000303, .rotator_clks = rotator_clocks, }; struct platform_device msm_rotator_device = { .name = "msm_rotator", .id = 0, .num_resources = ARRAY_SIZE(resources_msm_rotator), .resource = resources_msm_rotator, .dev = { .platform_data = &rotator_pdata, }, }; #endif static void __init msm_register_device(struct platform_device *pdev, void *data) { int ret; pdev->dev.platform_data = data; ret = platform_device_register(pdev); if (ret) dev_err(&pdev->dev, "%s: platform_device_register() failed = %d\n", __func__, ret); } void __init msm_fb_register_device(char *name, void *data) { if (!strncmp(name, "mdp", 3)) msm_register_device(&msm_mdp_device, data); else if (!strncmp(name, "pmdh", 4)) msm_register_device(&msm_mddi_device, data); else if (!strncmp(name, "emdh", 4)) msm_register_device(&msm_mddi_ext_device, data); else if (!strncmp(name, "ebi2", 4)) msm_register_device(&msm_ebi2_lcd_device, data); else if (!strncmp(name, "tvenc", 5)) msm_register_device(&msm_tvenc_device, data); else if (!strncmp(name, "lcdc", 4)) msm_register_device(&msm_lcdc_device, data); else if (!strncmp(name, "dtv", 3)) msm_register_device(&msm_dtv_device, data); #ifdef CONFIG_FB_MSM_TVOUT else if (!strncmp(name, "tvout_device", 12)) msm_register_device(&tvout_msm_device, data); #endif else printk(KERN_ERR "%s: unknown device! %s\n", __func__, name); } static struct platform_device msm_camera_device = { .name = "msm_camera", .id = 0, }; void __init msm_camera_register_device(void *res, uint32_t num, void *data) { msm_camera_device.num_resources = num; msm_camera_device.resource = res; msm_register_device(&msm_camera_device, data); } struct resource kgsl_3d0_resources[] = { { .name = KGSL_3D0_REG_MEMORY, .start = 0xA3500000, /* 3D GRP address */ .end = 0xA351ffff, .flags = IORESOURCE_MEM, }, { .name = KGSL_3D0_IRQ, .start = INT_GRP_3D, .end = INT_GRP_3D, .flags = IORESOURCE_IRQ, }, }; static struct kgsl_device_platform_data kgsl_3d0_pdata = { .pwrlevel = { { .gpu_freq = 245760000, .bus_freq = 192000000, }, { .gpu_freq = 192000000, .bus_freq = 152000000, }, { .gpu_freq = 192000000, .bus_freq = 0, }, }, .init_level = 0, .num_levels = 3, .set_grp_async = set_grp3d_async, .idle_timeout = HZ/20, .nap_allowed = true, .idle_needed = true, .clk_map = KGSL_CLK_SRC | KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM, }; struct platform_device msm_kgsl_3d0 = { .name = "kgsl-3d0", .id = 0, .num_resources = ARRAY_SIZE(kgsl_3d0_resources), .resource = kgsl_3d0_resources, .dev = { .platform_data = &kgsl_3d0_pdata, }, }; static struct resource kgsl_2d0_resources[] = { { .name = KGSL_2D0_REG_MEMORY, .start = 0xA3900000, /* Z180 base address */ .end = 0xA3900FFF, .flags = IORESOURCE_MEM, }, { .name = KGSL_2D0_IRQ, .start = INT_GRP_2D, .end = INT_GRP_2D, .flags = IORESOURCE_IRQ, }, }; static struct kgsl_device_platform_data kgsl_2d0_pdata = { .pwrlevel = { { .gpu_freq = 0, .bus_freq = 192000000, }, }, .init_level = 0, .num_levels = 1, /* HW workaround, run Z180 SYNC @ 192 MHZ */ .set_grp_async = NULL, .idle_timeout = HZ/10, .nap_allowed = true, .idle_needed = true, .clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE, }; struct platform_device msm_kgsl_2d0 = { .name = "kgsl-2d0", .id = 0, .num_resources = ARRAY_SIZE(kgsl_2d0_resources), .resource = kgsl_2d0_resources, .dev = { .platform_data = &kgsl_2d0_pdata, }, }; struct platform_device *msm_footswitch_devices[] = { FS_PCOM(FS_GFX2D0, "vdd", "kgsl-2d0.0"), FS_PCOM(FS_GFX3D, "vdd", "kgsl-3d0.0"), FS_PCOM(FS_MDP, "vdd", "mdp.0"), FS_PCOM(FS_MFC, "fs_mfc", NULL), FS_PCOM(FS_ROT, "vdd", "msm_rotator.0"), FS_PCOM(FS_VFE, "fs_vfe", NULL), FS_PCOM(FS_VPE, "fs_vpe", NULL), }; unsigned msm_num_footswitch_devices = ARRAY_SIZE(msm_footswitch_devices); static struct resource gpio_resources[] = { { .start = INT_GPIO_GROUP1, .flags = IORESOURCE_IRQ, }, { .start = INT_GPIO_GROUP2, .flags = IORESOURCE_IRQ, }, }; static struct platform_device msm_device_gpio = { .name = "msmgpio", .id = -1, .resource = gpio_resources, .num_resources = ARRAY_SIZE(gpio_resources), }; static int __init msm7630_init_gpio(void) { platform_device_register(&msm_device_gpio); return 0; } postcore_initcall(msm7630_init_gpio);
gpl-2.0
Sajid3/linux
sound/pci/vx222/vx222.c
1249
7140
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static bool mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static const struct pci_device_id snd_vx222_ids[] = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_threaded_irq(pci->irq, snd_vx_irq_handler, snd_vx_threaded_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } *rchip = vx; return 0; } static int snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); dev_dbg(card->dev, "%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); } #ifdef CONFIG_PM_SLEEP static int snd_vx222_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; return snd_vx_suspend(&vx->core); } static int snd_vx222_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; return snd_vx_resume(&vx->core); } static SIMPLE_DEV_PM_OPS(snd_vx222_pm, snd_vx222_suspend, snd_vx222_resume); #define SND_VX222_PM_OPS &snd_vx222_pm #else #define SND_VX222_PM_OPS NULL #endif static struct pci_driver vx222_driver = { .name = KBUILD_MODNAME, .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = snd_vx222_remove, .driver = { .pm = SND_VX222_PM_OPS, }, }; module_pci_driver(vx222_driver);
gpl-2.0
hypnos-android/Hypnos
fs/proc/devices.c
1761
1434
#include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static int devinfo_show(struct seq_file *f, void *v) { int i = *(loff_t *) v; if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) seq_printf(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) seq_printf(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif return 0; } static void *devinfo_start(struct seq_file *f, loff_t *pos) { if (*pos < (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return pos; return NULL; } static void *devinfo_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos >= (BLKDEV_MAJOR_HASH_SIZE + CHRDEV_MAJOR_HASH_SIZE)) return NULL; return pos; } static void devinfo_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations devinfo_ops = { .start = devinfo_start, .next = devinfo_next, .stop = devinfo_stop, .show = devinfo_show }; static int devinfo_open(struct inode *inode, struct file *filp) { return seq_open(filp, &devinfo_ops); } static const struct file_operations proc_devinfo_operations = { .open = devinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_devices_init(void) { proc_create("devices", 0, NULL, &proc_devinfo_operations); return 0; } module_init(proc_devices_init);
gpl-2.0
prakhya/linux_sai
arch/mips/pmcs-msp71xx/msp_irq_per.c
2017
3023
/* * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c * * This file define the irq handler for MSP PER subsystem interrupts. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <asm/mipsregs.h> #include <msp_cic_int.h> #include <msp_regs.h> /* * Convenience Macro. Should be somewhere generic. */ #define get_current_vpe() \ ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE) #ifdef CONFIG_SMP /* * The PER registers must be protected from concurrent access. */ static DEFINE_SPINLOCK(per_lock); #endif /* ensure writes to per are completed */ static inline void per_wmb(void) { const volatile void __iomem *per_mem = PER_INT_MSK_REG; volatile u32 dummy_read; wmb(); dummy_read = __raw_readl(per_mem); dummy_read++; } static inline void unmask_per_irq(struct irq_data *d) { #ifdef CONFIG_SMP unsigned long flags; spin_lock_irqsave(&per_lock, flags); *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE)); spin_unlock_irqrestore(&per_lock, flags); #else *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE)); #endif per_wmb(); } static inline void mask_per_irq(struct irq_data *d) { #ifdef CONFIG_SMP unsigned long flags; spin_lock_irqsave(&per_lock, flags); *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE)); spin_unlock_irqrestore(&per_lock, flags); #else *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE)); #endif per_wmb(); } static inline void msp_per_irq_ack(struct irq_data *d) { mask_per_irq(d); /* * In the PER interrupt controller, only bits 11 and 10 * are write-to-clear, (SPI TX complete, SPI RX complete). * It does nothing for any others. */ *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE)); } #ifdef CONFIG_SMP static int msp_per_irq_set_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { /* WTF is this doing ????? */ unmask_per_irq(d); return 0; } #endif static struct irq_chip msp_per_irq_controller = { .name = "MSP_PER", .irq_enable = unmask_per_irq, .irq_disable = mask_per_irq, .irq_ack = msp_per_irq_ack, #ifdef CONFIG_SMP .irq_set_affinity = msp_per_irq_set_affinity, #endif }; void __init msp_per_irq_init(void) { int i; /* Mask/clear interrupts. */ *PER_INT_MSK_REG = 0x00000000; *PER_INT_STS_REG = 0xFFFFFFFF; /* initialize all the IRQ descriptors */ for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { irq_set_chip(i, &msp_per_irq_controller); } } void msp_per_irq_dispatch(void) { u32 per_mask = *PER_INT_MSK_REG; u32 per_status = *PER_INT_STS_REG; u32 pending; pending = per_status & per_mask; if (pending) { do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1); } else { spurious_interrupt(); } }
gpl-2.0
q-li/linux-sunxi
net/nfc/nci/ntf.c
2017
15806
/* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> /* Handle NCI Notification packets */ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; int i; pr_debug("num_entries %d\n", ntf->num_entries); if (ntf->num_entries > NCI_MAX_NUM_CONN) ntf->num_entries = NCI_MAX_NUM_CONN; /* update the credits */ for (i = 0; i < ntf->num_entries; i++) { ntf->conn_entries[i].conn_id = nci_conn_id(&ntf->conn_entries[i].conn_id); pr_debug("entry[%d]: conn_id %d, credits %d\n", i, ntf->conn_entries[i].conn_id, ntf->conn_entries[i].credits); if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { /* found static rf connection */ atomic_add(ntf->conn_entries[i].credits, &ndev->credits_cnt); } } /* trigger the next tx */ if (!skb_queue_empty(&ndev->tx_q)) queue_work(ndev->tx_wq, &ndev->tx_work); } static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { /* Activation failed, so complete the request (the state remains the same) */ nci_req_complete(ndev, status); } } static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_intf_error_ntf *ntf = (void *) skb->data; ntf->conn_id = nci_conn_id(&ntf->conn_id); pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id); /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); } static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll, __u8 *data) { nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); data += 2; nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE); pr_debug("sens_res 0x%x, nfcid1_len %d\n", nfca_poll->sens_res, nfca_poll->nfcid1_len); memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len); data += nfca_poll->nfcid1_len; nfca_poll->sel_res_len = *data++; if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; pr_debug("sel_res_len %d, sel_res 0x%x\n", nfca_poll->sel_res_len, nfca_poll->sel_res); return data; } static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcb_poll *nfcb_poll, __u8 *data) { nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE); pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len); memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len); data += nfcb_poll->sensb_res_len; return data; } static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_poll *nfcf_poll, __u8 *data) { nfcf_poll->bit_rate = *data++; nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE); pr_debug("bit_rate %d, sensf_res_len %d\n", nfcf_poll->bit_rate, nfcf_poll->sensf_res_len); memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len); data += nfcf_poll->sensf_res_len; return data; } static int nci_add_new_protocol(struct nci_dev *ndev, struct nfc_target *target, __u8 rf_protocol, __u8 rf_tech_and_mode, void *params) { struct rf_tech_specific_params_nfca_poll *nfca_poll; struct rf_tech_specific_params_nfcb_poll *nfcb_poll; struct rf_tech_specific_params_nfcf_poll *nfcf_poll; __u32 protocol; if (rf_protocol == NCI_RF_PROTOCOL_T2T) protocol = NFC_PROTO_MIFARE_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) protocol = NFC_PROTO_ISO14443_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T3T) protocol = NFC_PROTO_FELICA_MASK; else protocol = 0; if (!(protocol & ndev->poll_prots)) { pr_err("the target found does not have the desired protocol\n"); return -EPROTO; } if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) { nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params; target->sens_res = nfca_poll->sens_res; target->sel_res = nfca_poll->sel_res; target->nfcid1_len = nfca_poll->nfcid1_len; if (target->nfcid1_len > 0) { memcpy(target->nfcid1, nfca_poll->nfcid1, target->nfcid1_len); } } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) { nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params; target->sensb_res_len = nfcb_poll->sensb_res_len; if (target->sensb_res_len > 0) { memcpy(target->sensb_res, nfcb_poll->sensb_res, target->sensb_res_len); } } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) { nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params; target->sensf_res_len = nfcf_poll->sensf_res_len; if (target->sensf_res_len > 0) { memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); } } else { pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode); return -EPROTO; } target->supported_protocols |= protocol; pr_debug("protocol 0x%x\n", protocol); return 0; } static void nci_add_new_target(struct nci_dev *ndev, struct nci_rf_discover_ntf *ntf) { struct nfc_target *target; int i, rc; for (i = 0; i < ndev->n_targets; i++) { target = &ndev->targets[i]; if (target->idx == ntf->rf_discovery_id) { /* This target already exists, add the new protocol */ nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); return; } } /* This is a new target, check if we've enough room */ if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) { pr_debug("not enough room, ignoring new target...\n"); return; } target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); if (!rc) { target->idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets); } } void nci_clear_target_list(struct nci_dev *ndev) { memset(ndev->targets, 0, (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS)); ndev->n_targets = 0; } static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_rf_discover_ntf ntf; __u8 *data = skb->data; bool add_target = true; ntf.rf_discovery_id = *data++; ntf.rf_protocol = *data++; ntf.rf_tech_and_mode = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; default: pr_err("unsupported rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); data += ntf.rf_tech_specific_params_len; add_target = false; } } ntf.ntf_type = *data++; pr_debug("ntf_type %d\n", ntf.ntf_type); if (add_target == true) nci_add_new_target(ndev, &ntf); if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) { atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES); } else { atomic_set(&ndev->state, NCI_W4_HOST_SELECT); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } } static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, __u8 *data) { struct activation_params_nfca_poll_iso_dep *nfca_poll; struct activation_params_nfcb_poll_iso_dep *nfcb_poll; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->rats_res_len = min_t(__u8, *data++, 20); pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len); if (nfca_poll->rats_res_len > 0) { memcpy(nfca_poll->rats_res, data, nfca_poll->rats_res_len); } break; case NCI_NFC_B_PASSIVE_POLL_MODE: nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep; nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50); pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len); if (nfcb_poll->attrib_res_len > 0) { memcpy(nfcb_poll->attrib_res, data, nfcb_poll->attrib_res_len); } break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static void nci_target_auto_activated(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf) { struct nfc_target *target; int rc; target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->activation_rf_tech_and_mode, &ntf->rf_tech_specific_params); if (rc) return; target->idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_rf_intf_activated_ntf ntf; __u8 *data = skb->data; int err = NCI_STATUS_OK; ntf.rf_discovery_id = *data++; ntf.rf_interface = *data++; ntf.rf_protocol = *data++; ntf.activation_rf_tech_and_mode = *data++; ntf.max_data_pkt_payload_size = *data++; ntf.initial_num_credits = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_interface 0x%x\n", ntf.rf_interface); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); pr_debug("max_data_pkt_payload_size 0x%x\n", ntf.max_data_pkt_payload_size); pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); err = NCI_STATUS_RF_PROTOCOL_ERROR; goto exit; } } ntf.data_exch_rf_tech_and_mode = *data++; ntf.data_exch_tx_bit_rate = *data++; ntf.data_exch_rx_bit_rate = *data++; ntf.activation_params_len = *data++; pr_debug("data_exch_rf_tech_and_mode 0x%x\n", ntf.data_exch_rf_tech_and_mode); pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate); pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate); pr_debug("activation_params_len %d\n", ntf.activation_params_len); if (ntf.activation_params_len > 0) { switch (ntf.rf_interface) { case NCI_RF_INTERFACE_ISO_DEP: err = nci_extract_activation_params_iso_dep(ndev, &ntf, data); break; case NCI_RF_INTERFACE_FRAME: /* no activation params */ break; default: pr_err("unsupported rf_interface 0x%x\n", ntf.rf_interface); err = NCI_STATUS_RF_PROTOCOL_ERROR; break; } } exit: if (err == NCI_STATUS_OK) { ndev->max_data_pkt_payload_size = ntf.max_data_pkt_payload_size; ndev->initial_num_credits = ntf.initial_num_credits; /* set the available credits to initial value */ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); } if (atomic_read(&ndev->state) == NCI_DISCOVERY) { /* A single target was found and activated automatically */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); if (err == NCI_STATUS_OK) nci_target_auto_activated(ndev, &ntf); } else { /* ndev->state == NCI_W4_HOST_SELECT */ /* A selected target was activated, so complete the request */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); nci_req_complete(ndev, err); } } static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); /* drop partial rx data packet */ if (ndev->rx_data_reassembly) { kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = 0; } /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); nci_req_complete(ndev, NCI_STATUS_OK); } void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(ntf_opcode), nci_opcode_oid(ntf_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); switch (ntf_opcode) { case NCI_OP_CORE_CONN_CREDITS_NTF: nci_core_conn_credits_ntf_packet(ndev, skb); break; case NCI_OP_CORE_GENERIC_ERROR_NTF: nci_core_generic_error_ntf_packet(ndev, skb); break; case NCI_OP_CORE_INTF_ERROR_NTF: nci_core_conn_intf_error_ntf_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_NTF: nci_rf_discover_ntf_packet(ndev, skb); break; case NCI_OP_RF_INTF_ACTIVATED_NTF: nci_rf_intf_activated_ntf_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_NTF: nci_rf_deactivate_ntf_packet(ndev, skb); break; default: pr_err("unknown ntf opcode 0x%x\n", ntf_opcode); break; } kfree_skb(skb); }
gpl-2.0
VigorCM9/vigor_aosp_kernel
security/tomoyo/load_policy.c
2529
2230
/* * security/tomoyo/load_policy.c * * Policy loader launcher for TOMOYO. * * Copyright (C) 2005-2010 NTT DATA CORPORATION */ #include "common.h" /* path to policy loader */ static const char *tomoyo_loader = "/sbin/tomoyo-init"; /** * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists. * * Returns true if /sbin/tomoyo-init exists, false otherwise. */ static bool tomoyo_policy_loader_exists(void) { /* * Don't activate MAC if the policy loader doesn't exist. * If the initrd includes /sbin/init but real-root-dev has not * mounted on / yet, activating MAC will block the system since * policies are not loaded yet. * Thus, let do_execve() call this function every time. */ struct path path; if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { printk(KERN_INFO "Not activating Mandatory Access Control now " "since %s doesn't exist.\n", tomoyo_loader); return false; } path_put(&path); return true; } /** * tomoyo_load_policy - Run external policy loader to load policy. * * @filename: The program about to start. * * This function checks whether @filename is /sbin/init , and if so * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init * and then continues invocation of /sbin/init. * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and * writes to /sys/kernel/security/tomoyo/ interfaces. * * Returns nothing. */ void tomoyo_load_policy(const char *filename) { char *argv[2]; char *envp[3]; if (tomoyo_policy_loaded) return; /* * Check filename is /sbin/init or /sbin/tomoyo-start. * /sbin/tomoyo-start is a dummy filename in case where /sbin/init can't * be passed. * You can create /sbin/tomoyo-start by * "ln -s /bin/true /sbin/tomoyo-start". */ if (strcmp(filename, "/sbin/init") && strcmp(filename, "/sbin/tomoyo-start")) return; if (!tomoyo_policy_loader_exists()) return; printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_loader); argv[0] = (char *) tomoyo_loader; argv[1] = NULL; envp[0] = "HOME=/"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[2] = NULL; call_usermodehelper(argv[0], argv, envp, 1); tomoyo_check_profile(); }
gpl-2.0