repo_name
string
path
string
copies
string
size
string
content
string
license
string
nmenon/ti-linux-kernel-nm
fs/cifs/smb2misc.c
196
17505
/* * fs/cifs/smb2misc.c * * Copyright (C) International Business Machines Corp., 2002,2011 * Etersoft, 2012 * Author(s): Steve French (sfrench@us.ibm.com) * Pavel Shilovsky (pshilovsky@samba.org) 2012 * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/ctype.h> #include "smb2pdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smb2proto.h" #include "cifs_debug.h" #include "cifs_unicode.h" #include "smb2status.h" static int check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) { /* * Make sure that this really is an SMB, that it is a response, * and that the message ids match. */ if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) && (mid == hdr->MessageId)) { if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR) return 0; else { /* only one valid case where server sends us request */ if (hdr->Command == SMB2_OPLOCK_BREAK) return 0; else cERROR(1, "Received Request not response"); } } else { /* bad signature or mid */ if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER) cERROR(1, "Bad protocol string signature header %x", *(unsigned int *) hdr->ProtocolId); if (mid != hdr->MessageId) cERROR(1, "Mids do not match: %llu and %llu", mid, hdr->MessageId); } cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); return 1; } /* * The following table defines the expected "StructureSize" of SMB2 responses * in order by SMB2 command. This is similar to "wct" in SMB/CIFS responses. * * Note that commands are defined in smb2pdu.h in le16 but the array below is * indexed by command in host byte order */ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ __constant_cpu_to_le16(65), /* SMB2_SESSION_SETUP */ __constant_cpu_to_le16(9), /* SMB2_LOGOFF */ __constant_cpu_to_le16(4), /* SMB2_TREE_CONNECT */ __constant_cpu_to_le16(16), /* SMB2_TREE_DISCONNECT */ __constant_cpu_to_le16(4), /* SMB2_CREATE */ __constant_cpu_to_le16(89), /* SMB2_CLOSE */ __constant_cpu_to_le16(60), /* SMB2_FLUSH */ __constant_cpu_to_le16(4), /* SMB2_READ */ __constant_cpu_to_le16(17), /* SMB2_WRITE */ __constant_cpu_to_le16(17), /* SMB2_LOCK */ __constant_cpu_to_le16(4), /* SMB2_IOCTL */ __constant_cpu_to_le16(49), /* BB CHECK this ... not listed in documentation */ /* SMB2_CANCEL */ __constant_cpu_to_le16(0), /* SMB2_ECHO */ __constant_cpu_to_le16(4), /* SMB2_QUERY_DIRECTORY */ __constant_cpu_to_le16(9), /* SMB2_CHANGE_NOTIFY */ __constant_cpu_to_le16(9), /* SMB2_QUERY_INFO */ __constant_cpu_to_le16(9), /* SMB2_SET_INFO */ __constant_cpu_to_le16(2), /* BB FIXME can also be 44 for lease break */ /* SMB2_OPLOCK_BREAK */ __constant_cpu_to_le16(24) }; int smb2_check_message(char *buf, unsigned int length) { struct smb2_hdr *hdr = (struct smb2_hdr *)buf; struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; __u64 mid = hdr->MessageId; __u32 len = get_rfc1002_length(buf); __u32 clc_len; /* calculated length */ int command; /* BB disable following printk later */ cFYI(1, "%s length: 0x%x, smb_buf_length: 0x%x", __func__, length, len); /* * Add function to do table lookup of StructureSize by command * ie Validate the wct via smb2_struct_sizes table above */ if (length < sizeof(struct smb2_pdu)) { if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { pdu->StructureSize2 = 0; /* * As with SMB/CIFS, on some error cases servers may * not return wct properly */ return 0; } else { cERROR(1, "Length less than SMB header size"); } return 1; } if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { cERROR(1, "SMB length greater than maximum, mid=%llu", mid); return 1; } if (check_smb2_hdr(hdr, mid)) return 1; if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { cERROR(1, "Illegal structure size %u", le16_to_cpu(hdr->StructureSize)); return 1; } command = le16_to_cpu(hdr->Command); if (command >= NUMBER_OF_SMB2_COMMANDS) { cERROR(1, "Illegal SMB2 command %d", command); return 1; } if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) { if (command != SMB2_OPLOCK_BREAK_HE && (hdr->Status == 0 || pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) { /* error packets have 9 byte structure size */ cERROR(1, "Illegal response size %u for command %d", le16_to_cpu(pdu->StructureSize2), command); return 1; } else if (command == SMB2_OPLOCK_BREAK_HE && (hdr->Status == 0) && (le16_to_cpu(pdu->StructureSize2) != 44) && (le16_to_cpu(pdu->StructureSize2) != 36)) { /* special case for SMB2.1 lease break message */ cERROR(1, "Illegal response size %d for oplock break", le16_to_cpu(pdu->StructureSize2)); return 1; } } if (4 + len != length) { cERROR(1, "Total length %u RFC1002 length %u mismatch mid %llu", length, 4 + len, mid); return 1; } clc_len = smb2_calc_size(hdr); if (4 + len != clc_len) { cFYI(1, "Calculated size %u length %u mismatch mid %llu", clc_len, 4 + len, mid); /* Windows 7 server returns 24 bytes more */ if (clc_len + 20 == len && command == SMB2_OPLOCK_BREAK_HE) return 0; /* server can return one byte more */ if (clc_len == 4 + len + 1) return 0; return 1; } return 0; } /* * The size of the variable area depends on the offset and length fields * located in different fields for various SMB2 responses. SMB2 responses * with no variable length info, show an offset of zero for the offset field. */ static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = { /* SMB2_NEGOTIATE */ true, /* SMB2_SESSION_SETUP */ true, /* SMB2_LOGOFF */ false, /* SMB2_TREE_CONNECT */ false, /* SMB2_TREE_DISCONNECT */ false, /* SMB2_CREATE */ true, /* SMB2_CLOSE */ false, /* SMB2_FLUSH */ false, /* SMB2_READ */ true, /* SMB2_WRITE */ false, /* SMB2_LOCK */ false, /* SMB2_IOCTL */ true, /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */ /* SMB2_ECHO */ false, /* SMB2_QUERY_DIRECTORY */ true, /* SMB2_CHANGE_NOTIFY */ true, /* SMB2_QUERY_INFO */ true, /* SMB2_SET_INFO */ false, /* SMB2_OPLOCK_BREAK */ false }; /* * Returns the pointer to the beginning of the data area. Length of the data * area and the offset to it (from the beginning of the smb are also returned. */ char * smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr) { *off = 0; *len = 0; /* error responses do not have data area */ if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED && (((struct smb2_err_rsp *)hdr)->StructureSize) == SMB2_ERROR_STRUCTURE_SIZE2) return NULL; /* * Following commands have data areas so we have to get the location * of the data buffer offset and data buffer length for the particular * command. */ switch (hdr->Command) { case SMB2_NEGOTIATE: *off = le16_to_cpu( ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferOffset); *len = le16_to_cpu( ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferLength); break; case SMB2_SESSION_SETUP: *off = le16_to_cpu( ((struct smb2_sess_setup_rsp *)hdr)->SecurityBufferOffset); *len = le16_to_cpu( ((struct smb2_sess_setup_rsp *)hdr)->SecurityBufferLength); break; case SMB2_CREATE: *off = le32_to_cpu( ((struct smb2_create_rsp *)hdr)->CreateContextsOffset); *len = le32_to_cpu( ((struct smb2_create_rsp *)hdr)->CreateContextsLength); break; case SMB2_QUERY_INFO: *off = le16_to_cpu( ((struct smb2_query_info_rsp *)hdr)->OutputBufferOffset); *len = le32_to_cpu( ((struct smb2_query_info_rsp *)hdr)->OutputBufferLength); break; case SMB2_READ: *off = ((struct smb2_read_rsp *)hdr)->DataOffset; *len = le32_to_cpu(((struct smb2_read_rsp *)hdr)->DataLength); break; case SMB2_QUERY_DIRECTORY: *off = le16_to_cpu( ((struct smb2_query_directory_rsp *)hdr)->OutputBufferOffset); *len = le32_to_cpu( ((struct smb2_query_directory_rsp *)hdr)->OutputBufferLength); break; case SMB2_IOCTL: case SMB2_CHANGE_NOTIFY: default: /* BB FIXME for unimplemented cases above */ cERROR(1, "no length check for command"); break; } /* * Invalid length or offset probably means data area is invalid, but * we have little choice but to ignore the data area in this case. */ if (*off > 4096) { cERROR(1, "offset %d too large, data area ignored", *off); *len = 0; *off = 0; } else if (*off < 0) { cERROR(1, "negative offset %d to data invalid ignore data area", *off); *off = 0; *len = 0; } else if (*len < 0) { cERROR(1, "negative data length %d invalid, data area ignored", *len); *len = 0; } else if (*len > 128 * 1024) { cERROR(1, "data area larger than 128K: %d", *len); *len = 0; } /* return pointer to beginning of data area, ie offset from SMB start */ if ((*off != 0) && (*len != 0)) return hdr->ProtocolId + *off; else return NULL; } /* * Calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message. */ unsigned int smb2_calc_size(void *buf) { struct smb2_hdr *hdr = (struct smb2_hdr *)buf; struct smb2_pdu *pdu = (struct smb2_pdu *)hdr; int offset; /* the offset from the beginning of SMB to data area */ int data_length; /* the length of the variable length data area */ /* Structure Size has already been checked to make sure it is 64 */ int len = 4 + le16_to_cpu(pdu->hdr.StructureSize); /* * StructureSize2, ie length of fixed parameter area has already * been checked to make sure it is the correct length. */ len += le16_to_cpu(pdu->StructureSize2); if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false) goto calc_size_exit; smb2_get_data_area_len(&offset, &data_length, hdr); cFYI(1, "SMB2 data length %d offset %d", data_length, offset); if (data_length > 0) { /* * Check to make sure that data area begins after fixed area, * Note that last byte of the fixed area is part of data area * for some commands, typically those with odd StructureSize, * so we must add one to the calculation (and 4 to account for * the size of the RFC1001 hdr. */ if (offset + 4 + 1 < len) { cERROR(1, "data area offset %d overlaps SMB2 header %d", offset + 4 + 1, len); data_length = 0; } else { len = 4 + offset + data_length; } } calc_size_exit: cFYI(1, "SMB2 len %d", len); return len; } /* Note: caller must free return buffer */ __le16 * cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb) { int len; const char *start_of_path; __le16 *to; /* Windows doesn't allow paths beginning with \ */ if (from[0] == '\\') start_of_path = from + 1; else start_of_path = from; to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); return to; } __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode) { if (cinode->clientCanCacheAll) return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING; else if (cinode->clientCanCacheRead) return SMB2_LEASE_READ_CACHING; return 0; } __u8 smb2_map_lease_to_oplock(__le32 lease_state) { if (lease_state & SMB2_LEASE_WRITE_CACHING) { if (lease_state & SMB2_LEASE_HANDLE_CACHING) return SMB2_OPLOCK_LEVEL_BATCH; else return SMB2_OPLOCK_LEVEL_EXCLUSIVE; } else if (lease_state & SMB2_LEASE_READ_CACHING) return SMB2_OPLOCK_LEVEL_II; return 0; } struct smb2_lease_break_work { struct work_struct lease_break; struct tcon_link *tlink; __u8 lease_key[16]; __le32 lease_state; }; static void cifs_ses_oplock_break(struct work_struct *work) { struct smb2_lease_break_work *lw = container_of(work, struct smb2_lease_break_work, lease_break); int rc; rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key, lw->lease_state); cFYI(1, "Lease release rc %d", rc); cifs_put_tlink(lw->tlink); kfree(lw); } static bool smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server) { struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer; struct list_head *tmp, *tmp1, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *cinode; struct cifsFileInfo *cfile; struct cifs_pending_open *open; struct smb2_lease_break_work *lw; bool found; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); if (!lw) { cERROR(1, "Memory allocation failed during lease break check"); return false; } INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); lw->lease_state = rsp->NewLeaseState; cFYI(1, "Checking for lease break"); /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); spin_lock(&cifs_file_list_lock); list_for_each(tmp1, &ses->tcon_list) { tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); list_for_each(tmp2, &tcon->openFileList) { cfile = list_entry(tmp2, struct cifsFileInfo, tlist); cinode = CIFS_I(cfile->dentry->d_inode); if (memcmp(cinode->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; cFYI(1, "found in the open list"); cFYI(1, "lease key match, lease break 0x%d", le32_to_cpu(rsp->NewLeaseState)); smb2_set_oplock_level(cinode, smb2_map_lease_to_oplock(rsp->NewLeaseState)); if (ack_req) cfile->oplock_break_cancelled = false; else cfile->oplock_break_cancelled = true; queue_work(cifsiod_wq, &cfile->oplock_break); spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } found = false; list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { found = true; memcpy(lw->lease_key, open->lease_key, SMB2_LEASE_KEY_SIZE); lw->tlink = cifs_get_tlink(open->tlink); queue_work(cifsiod_wq, &lw->lease_break); } cFYI(1, "found in the pending open list"); cFYI(1, "lease key match, lease break 0x%d", le32_to_cpu(rsp->NewLeaseState)); open->oplock = smb2_map_lease_to_oplock(rsp->NewLeaseState); } if (found) { spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } } spin_unlock(&cifs_file_list_lock); } spin_unlock(&cifs_tcp_ses_lock); kfree(lw); cFYI(1, "Can not process lease break - no lease matched"); return false; } bool smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) { struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer; struct list_head *tmp, *tmp1, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *cinode; struct cifsFileInfo *cfile; cFYI(1, "Checking for oplock break"); if (rsp->hdr.Command != SMB2_OPLOCK_BREAK) return false; if (rsp->StructureSize != smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { if (le16_to_cpu(rsp->StructureSize) == 44) return smb2_is_valid_lease_break(buffer, server); else return false; } cFYI(1, "oplock level 0x%d", rsp->OplockLevel); /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifs_ses, smb_ses_list); list_for_each(tmp1, &ses->tcon_list) { tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&cifs_file_list_lock); list_for_each(tmp2, &tcon->openFileList) { cfile = list_entry(tmp2, struct cifsFileInfo, tlist); if (rsp->PersistentFid != cfile->fid.persistent_fid || rsp->VolatileFid != cfile->fid.volatile_fid) continue; cFYI(1, "file id match, oplock break"); cinode = CIFS_I(cfile->dentry->d_inode); if (!cinode->clientCanCacheAll && rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE) cfile->oplock_break_cancelled = true; else cfile->oplock_break_cancelled = false; smb2_set_oplock_level(cinode, rsp->OplockLevel ? SMB2_OPLOCK_LEVEL_II : 0); queue_work(cifsiod_wq, &cfile->oplock_break); spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); cFYI(1, "No matching file for oplock break"); return true; } } spin_unlock(&cifs_tcp_ses_lock); cFYI(1, "Can not process oplock break for non-existent connection"); return false; }
gpl-2.0
alskjstl/linux
arch/s390/crypto/sha1_s390.c
452
2861
/* * Cryptographic API. * * s390 implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. Originally based on the public domain * implementation written by Steve Reid. * * s390 Version: * Copyright IBM Corp. 2003, 2007 * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) * * Derived from "crypto/sha1_generic.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/cpufeature.h> #include <crypto/sha.h> #include "crypt_s390.h" #include "sha.h" static int sha1_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA1_H0; sctx->state[1] = SHA1_H1; sctx->state[2] = SHA1_H2; sctx->state[3] = SHA1_H3; sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = KIMD_SHA_1; return 0; } static int sha1_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *octx = out; octx->count = sctx->count; memcpy(octx->state, sctx->state, sizeof(octx->state)); memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); return 0; } static int sha1_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha1_state *ictx = in; sctx->count = ictx->count; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sctx->func = KIMD_SHA_1; return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha1_export, .import = sha1_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_s390_init(void) { if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA)) return -EOPNOTSUPP; return crypto_register_shash(&alg); } static void __exit sha1_s390_fini(void) { crypto_unregister_shash(&alg); } module_cpu_feature_match(MSA, sha1_s390_init); module_exit(sha1_s390_fini); MODULE_ALIAS_CRYPTO("sha1"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
gpl-2.0
quadcores/cbs_4.2.4
drivers/net/wireless/rtlwifi/pci.c
708
68830
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "wifi.h" #include "core.h" #include "pci.h" #include "base.h" #include "ps.h" #include "efuse.h" #include <linux/interrupt.h> #include <linux/export.h> #include <linux/kmemleak.h> #include <linux/module.h> MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCI basic driver for rtlwifi"); static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { INTEL_VENDOR_ID, ATI_VENDOR_ID, AMD_VENDOR_ID, SIS_VENDOR_ID }; static const u8 ac_to_hwq[] = { VO_QUEUE, VI_QUEUE, BE_QUEUE, BK_QUEUE }; static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); __le16 fc = rtl_get_fc(skb); u8 queue_index = skb_get_queue_mapping(skb); if (unlikely(ieee80211_is_beacon(fc))) return BEACON_QUEUE; if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) return MGNT_QUEUE; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) if (ieee80211_is_nullfunc(fc)) return HIGH_QUEUE; return ac_to_hwq[queue_index]; } /* Update PCI dependent default settings*/ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; u8 init_aspm; ppsc->reg_rfps_level = 0; ppsc->support_aspm = false; /*Update PCI ASPM setting */ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; switch (rtlpci->const_pci_aspm) { case 0: /*No ASPM */ break; case 1: /*ASPM dynamically enabled/disable. */ ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM; break; case 2: /*ASPM with Clock Req dynamically enabled/disable. */ ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM | RT_RF_OFF_LEVL_CLK_REQ); break; case 3: /* * Always enable ASPM and Clock Req * from initialization to halt. * */ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM); ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM | RT_RF_OFF_LEVL_CLK_REQ); break; case 4: /* * Always enable ASPM without Clock Req * from initialization to halt. * */ ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM | RT_RF_OFF_LEVL_CLK_REQ); ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM; break; } ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC; /*Update Radio OFF setting */ switch (rtlpci->const_hwsw_rfoff_d3) { case 1: if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM) ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM; break; case 2: if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM) ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM; ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC; break; case 3: ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3; break; } /*Set HW definition to determine if it supports ASPM. */ switch (rtlpci->const_support_pciaspm) { case 0:{ /*Not support ASPM. */ bool support_aspm = false; ppsc->support_aspm = support_aspm; break; } case 1:{ /*Support ASPM. */ bool support_aspm = true; bool support_backdoor = true; ppsc->support_aspm = support_aspm; /*if (priv->oem_id == RT_CID_TOSHIBA && !priv->ndis_adapter.amd_l1_patch) support_backdoor = false; */ ppsc->support_backdoor = support_backdoor; break; } case 2: /*ASPM value set by chipset. */ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) { bool support_aspm = true; ppsc->support_aspm = support_aspm; } break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case not processed\n"); break; } /* toshiba aspm issue, toshiba will set aspm selfly * so we should not set aspm in driver */ pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm); if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE && init_aspm == 0x43) ppsc->support_aspm = false; } static bool _rtl_pci_platform_switch_device_pci_aspm( struct ieee80211_hw *hw, u8 value) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) value |= 0x40; pci_write_config_byte(rtlpci->pdev, 0x80, value); return false; } /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/ static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); pci_write_config_byte(rtlpci->pdev, 0x81, value); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) udelay(100); } /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; u8 num4bytes = pcipriv->ndis_adapter.num4bytes; /*Retrieve original configuration settings. */ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg; u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter. pcibridge_linkctrlreg; u16 aspmlevel = 0; u8 tmp_u1b = 0; if (!ppsc->support_aspm) return; if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "PCI(Bridge) UNKNOWN\n"); return; } if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) { RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); _rtl_pci_switch_clk_req(hw, 0x0); } /*for promising device will in L0 state after an I/O. */ pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b); /*Set corresponding value. */ aspmlevel |= BIT(0) | BIT(1); linkctrl_reg &= ~aspmlevel; pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1)); _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg); udelay(50); /*4 Disable Pci Bridge ASPM */ pci_write_config_byte(rtlpci->pdev, (num4bytes << 2), pcibridge_linkctrlreg); udelay(50); } /* *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for *power saving We should follow the sequence to enable *RTL8192SE first then enable Pci Bridge ASPM *or the system will show bluescreen. */ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; u8 num4bytes = pcipriv->ndis_adapter.num4bytes; u16 aspmlevel; u8 u_pcibridge_aspmsetting; u8 u_device_aspmsetting; if (!ppsc->support_aspm) return; if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "PCI(Bridge) UNKNOWN\n"); return; } /*4 Enable Pci Bridge ASPM */ u_pcibridge_aspmsetting = pcipriv->ndis_adapter.pcibridge_linkctrlreg | rtlpci->const_hostpci_aspm_setting; if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) u_pcibridge_aspmsetting &= ~BIT(0); pci_write_config_byte(rtlpci->pdev, (num4bytes << 2), u_pcibridge_aspmsetting); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PlatformEnableASPM(): Write reg[%x] = %x\n", (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10), u_pcibridge_aspmsetting); udelay(50); /*Get ASPM level (with/without Clock Req) */ aspmlevel = rtlpci->const_devicepci_aspm_setting; u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg; /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/ /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */ u_device_aspmsetting |= aspmlevel; _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) { _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0); RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); } udelay(100); } static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); bool status = false; u8 offset_e0; unsigned offset_e4; pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0); pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0); if (offset_e0 == 0xA0) { pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4); if (offset_e4 & BIT(23)) status = true; } return status; } static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, struct rtl_priv **buddy_priv) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); bool find_buddy_priv = false; struct rtl_priv *tpriv = NULL; struct rtl_pci_priv *tpcipriv = NULL; if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list, list) { if (tpriv) { tpcipriv = (struct rtl_pci_priv *)tpriv->priv; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "pcipriv->ndis_adapter.funcnumber %x\n", pcipriv->ndis_adapter.funcnumber); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "tpcipriv->ndis_adapter.funcnumber %x\n", tpcipriv->ndis_adapter.funcnumber); if ((pcipriv->ndis_adapter.busnumber == tpcipriv->ndis_adapter.busnumber) && (pcipriv->ndis_adapter.devnumber == tpcipriv->ndis_adapter.devnumber) && (pcipriv->ndis_adapter.funcnumber != tpcipriv->ndis_adapter.funcnumber)) { find_buddy_priv = true; break; } } } } RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "find_buddy_priv %d\n", find_buddy_priv); if (find_buddy_priv) *buddy_priv = tpriv; return find_buddy_priv; } static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset; u8 linkctrl_reg; u8 num4bbytes; num4bbytes = (capabilityoffset + 0x10) / 4; /*Read Link Control Register */ pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg); pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg; } static void rtl_pci_parse_configuration(struct pci_dev *pdev, struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); u8 tmp; u16 linkctrl_reg; /*Link Control Register */ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg); pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n", pcipriv->ndis_adapter.linkctrl_reg); pci_read_config_byte(pdev, 0x98, &tmp); tmp |= BIT(4); pci_write_config_byte(pdev, 0x98, tmp); tmp = 0x17; pci_write_config_byte(pdev, 0x70f, tmp); } static void rtl_pci_init_aspm(struct ieee80211_hw *hw) { struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); _rtl_pci_update_default_setting(hw); if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) { /*Always enable ASPM & Clock Req. */ rtl_pci_enable_aspm(hw); RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM); } } static void _rtl_pci_io_handler_init(struct device *dev, struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->io.dev = dev; rtlpriv->io.write8_async = pci_write8_async; rtlpriv->io.write16_async = pci_write16_async; rtlpriv->io.write32_async = pci_write32_async; rtlpriv->io.read8_sync = pci_read8_sync; rtlpriv->io.read16_sync = pci_read16_sync; rtlpriv->io.read32_sync = pci_read32_sync; } static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw, struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct sk_buff *next_skb; u8 additionlen = FCS_LEN; /* here open is 4, wep/tkip is 8, aes is 12*/ if (info->control.hw_key) additionlen += info->control.hw_key->icv_len; /* The most skb num is 6 */ tcb_desc->empkt_num = 0; spin_lock_bh(&rtlpriv->locks.waitq_lock); skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) { struct ieee80211_tx_info *next_info; next_info = IEEE80211_SKB_CB(next_skb); if (next_info->flags & IEEE80211_TX_CTL_AMPDU) { tcb_desc->empkt_len[tcb_desc->empkt_num] = next_skb->len + additionlen; tcb_desc->empkt_num++; } else { break; } if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid], next_skb)) break; if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num) break; } spin_unlock_bh(&rtlpriv->locks.waitq_lock); return true; } /* just for early mode now */ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct sk_buff *skb = NULL; struct ieee80211_tx_info *info = NULL; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int tid; if (!rtlpriv->rtlhal.earlymode_enable) return; if (rtlpriv->dm.supp_phymode_switch && (rtlpriv->easy_concurrent_ctl.switch_in_process || (rtlpriv->buddy_priv && rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process))) return; /* we juse use em for BE/BK/VI/VO */ for (tid = 7; tid >= 0; tid--) { u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)]; struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; while (!mac->act_scanning && rtlpriv->psc.rfpwr_state == ERFON) { struct rtl_tcb_desc tcb_desc; memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); spin_lock_bh(&rtlpriv->locks.waitq_lock); if (!skb_queue_empty(&mac->skb_waitq[tid]) && (ring->entries - skb_queue_len(&ring->queue) > rtlhal->max_earlymode_num)) { skb = skb_dequeue(&mac->skb_waitq[tid]); } else { spin_unlock_bh(&rtlpriv->locks.waitq_lock); break; } spin_unlock_bh(&rtlpriv->locks.waitq_lock); /* Some macaddr can't do early mode. like * multicast/broadcast/no_qos data */ info = IEEE80211_SKB_CB(skb); if (info->flags & IEEE80211_TX_CTL_AMPDU) _rtl_update_earlymode_info(hw, skb, &tcb_desc, tid); rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); } } } static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { struct sk_buff *skb; struct ieee80211_tx_info *info; __le16 fc; u8 tid; u8 *entry; if (rtlpriv->use_new_trx_flow) entry = (u8 *)(&ring->buffer_desc[ring->idx]); else entry = (u8 *)(&ring->desc[ring->idx]); if (rtlpriv->cfg->ops->get_available_desc && rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) { RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG, "no available desc!\n"); return; } if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) return; ring->idx = (ring->idx + 1) % ring->entries; skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops-> get_desc((u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); /* remove early mode header */ if (rtlpriv->rtlhal.earlymode_enable) skb_pull(skb, EM_HDR_LEN); RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n", ring->idx, skb_queue_len(&ring->queue), *(u16 *)(skb->data + 22)); if (prio == TXCMD_QUEUE) { dev_kfree_skb(skb); goto tx_status_ok; } /* for sw LPS, just after NULL skb send out, we can * sure AP knows we are sleeping, we should not let * rf sleep */ fc = rtl_get_fc(skb); if (ieee80211_is_nullfunc(fc)) { if (ieee80211_has_pm(fc)) { rtlpriv->mac80211.offchan_delay = true; rtlpriv->psc.state_inap = true; } else { rtlpriv->psc.state_inap = false; } } if (ieee80211_is_action(fc)) { struct ieee80211_mgmt *action_frame = (struct ieee80211_mgmt *)skb->data; if (action_frame->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) { dev_kfree_skb(skb); goto tx_status_ok; } } /* update tid tx pkt num */ tid = rtl_get_tid(skb); if (tid <= 7) rtlpriv->link_info.tidtx_inperiod[tid]++; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); info->flags |= IEEE80211_TX_STAT_ACK; /*info->status.rates[0].count = 1; */ ieee80211_tx_status_irqsafe(hw, skb); if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) { RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n", prio, ring->idx, skb_queue_len(&ring->queue)); ieee80211_wake_queue(hw, skb_get_queue_mapping (skb)); } tx_status_ok: skb = NULL; } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { rtlpriv->enter_ps = false; schedule_work(&rtlpriv->works.lps_change_work); } } static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, struct sk_buff *new_skb, u8 *entry, int rxring_idx, int desc_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u32 bufferaddress; u8 tmp_one = 1; struct sk_buff *skb; if (likely(new_skb)) { skb = new_skb; goto remap; } skb = dev_alloc_skb(rtlpci->rxbuffersize); if (!skb) return 0; remap: /* just set skb->cb to mapping addr for pci_unmap_single use */ *((dma_addr_t *)skb->cb) = pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); bufferaddress = *((dma_addr_t *)skb->cb); if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) return 0; rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; if (rtlpriv->use_new_trx_flow) { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RX_PREPARE, (u8 *)&bufferaddress); } else { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXBUFF_ADDR, (u8 *)&bufferaddress); rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXPKT_LEN, (u8 *)&rtlpci->rxbuffersize); rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXOWN, (u8 *)&tmp_one); } return 1; } /* inorder to receive 8K AMSDU we have set skb to * 9100bytes in init rx ring, but if this packet is * not a AMSDU, this large packet will be sent to * TCP/IP directly, this cause big packet ping fail * like: "ping -s 65507", so here we will realloc skb * based on the true size of packet, Mac80211 * Probably will do it better, but does not yet. * * Some platform will fail when alloc skb sometimes. * in this condition, we will send the old skb to * mac80211 directly, this will not cause any other * issues, but only this packet will be lost by TCP/IP */ static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_rx_status rx_status) { if (unlikely(!rtl_action_proc(hw, skb, false))) { dev_kfree_skb_any(skb); } else { struct sk_buff *uskb = NULL; u8 *pdata; uskb = dev_alloc_skb(skb->len + 128); if (likely(uskb)) { memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); pdata = (u8 *)skb_put(uskb, skb->len); memcpy(pdata, skb->data, skb->len); dev_kfree_skb_any(skb); ieee80211_rx_irqsafe(hw, uskb); } else { ieee80211_rx_irqsafe(hw, skb); } } } /*hsisr interrupt handler*/ static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR], rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) | rtlpci->sys_irq_mask); } static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int rxring_idx = RTL_PCI_RX_MPDU_QUEUE; struct ieee80211_rx_status rx_status = { 0 }; unsigned int count = rtlpci->rxringcount; u8 own; u8 tmp_one; bool unicast = false; u8 hw_queue = 0; unsigned int rx_remained_cnt; struct rtl_stats stats = { .signal = 0, .rate = 0, }; /*RX NORMAL PKT */ while (count--) { struct ieee80211_hdr *hdr; __le16 fc; u16 len; /*rx buffer descriptor */ struct rtl_rx_buffer_desc *buffer_desc = NULL; /*if use new trx flow, it means wifi info */ struct rtl_rx_desc *pdesc = NULL; /*rx pkt */ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ rtlpci->rx_ring[rxring_idx].idx]; struct sk_buff *new_skb; if (rtlpriv->use_new_trx_flow) { rx_remained_cnt = rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, hw_queue); if (rx_remained_cnt == 0) return; } else { /* rx descriptor */ pdesc = &rtlpci->rx_ring[rxring_idx].desc[ rtlpci->rx_ring[rxring_idx].idx]; own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false, HW_DESC_OWN); if (own) /* wait data to be filled by hardware */ return; } /* Reaching this point means: data is filled already * AAAAAAttention !!! * We can NOT access 'skb' before 'pci_unmap_single' */ pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); /* get a new skb - if fail, old one will be reused */ new_skb = dev_alloc_skb(rtlpci->rxbuffersize); if (unlikely(!new_skb)) goto no_new; if (rtlpriv->use_new_trx_flow) { buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc [rtlpci->rx_ring[rxring_idx].idx]; /*means rx wifi info*/ pdesc = (struct rtl_rx_desc *)skb->data; } memset(&rx_status , 0 , sizeof(rx_status)); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, (u8 *)pdesc, skb); if (rtlpriv->use_new_trx_flow) rtlpriv->cfg->ops->rx_check_dma_ok(hw, (u8 *)buffer_desc, hw_queue); len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false, HW_DESC_RXPKT_LEN); if (skb->end - skb->tail > len) { skb_put(skb, len); if (rtlpriv->use_new_trx_flow) skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift + 24); else skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "skb->end - skb->tail = %d, len is %d\n", skb->end - skb->tail, len); dev_kfree_skb_any(skb); goto new_trx_end; } /* handle command packet here */ if (rtlpriv->cfg->ops->rx_command_packet && rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { dev_kfree_skb_any(skb); goto new_trx_end; } /* * NOTICE This can not be use for mac80211, * this is done in mac80211 code, * if done here sec DHCP will fail * skb_trim(skb, skb->len - 4); */ hdr = rtl_get_hdr(skb); fc = rtl_get_fc(skb); if (!stats.crc && !stats.hwerror) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { ;/*TODO*/ } else if (is_multicast_ether_addr(hdr->addr1)) { ;/*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } rtl_is_special_data(hw, skb, false, true); if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } /* static bcn for roaming */ rtl_beacon_statistic(hw, skb); rtl_p2p_info(hw, (void *)skb->data, skb->len); /* for sw lps */ rtl_swlps_beacon(hw, (void *)skb->data, skb->len); rtl_recognize_peer(hw, (void *)skb->data, skb->len); if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) && (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) && (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc))) { dev_kfree_skb_any(skb); } else { _rtl_pci_rx_to_mac80211(hw, skb, rx_status); } } else { dev_kfree_skb_any(skb); } new_trx_end: if (rtlpriv->use_new_trx_flow) { rtlpci->rx_ring[hw_queue].next_rx_rp += 1; rtlpci->rx_ring[hw_queue].next_rx_rp %= RTL_PCI_MAX_RX_COUNT; rx_remained_cnt--; rtl_write_word(rtlpriv, 0x3B4, rtlpci->rx_ring[hw_queue].next_rx_rp); } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { rtlpriv->enter_ps = false; schedule_work(&rtlpriv->works.lps_change_work); } skb = new_skb; no_new: if (rtlpriv->use_new_trx_flow) { _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, rxring_idx, rtlpci->rx_ring[rxring_idx].idx); } else { _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, rxring_idx, rtlpci->rx_ring[rxring_idx].idx); if (rtlpci->rx_ring[rxring_idx].idx == rtlpci->rxringcount - 1) rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXERO, (u8 *)&tmp_one); } rtlpci->rx_ring[rxring_idx].idx = (rtlpci->rx_ring[rxring_idx].idx + 1) % rtlpci->rxringcount; } } static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) { struct ieee80211_hw *hw = dev_id; struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned long flags; u32 inta = 0; u32 intb = 0; irqreturn_t ret = IRQ_HANDLED; if (rtlpci->irq_enabled == 0) return ret; spin_lock_irqsave(&rtlpriv->locks.irq_th_lock , flags); rtlpriv->cfg->ops->disable_interrupt(hw); /*read ISR: 4/8bytes */ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); /*Shared IRQ or HW disappared */ if (!inta || inta == 0xffff) goto done; /*<1> beacon related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon ok interrupt!\n"); } if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon err interrupt!\n"); } if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n"); } if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "prepare beacon for interrupt!\n"); tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet); } /*<2> Tx related */ if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n"); if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Manage ok interrupt!\n"); _rtl_pci_tx_isr(hw, MGNT_QUEUE); } if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "HIGH_QUEUE ok interrupt!\n"); _rtl_pci_tx_isr(hw, HIGH_QUEUE); } if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "BK Tx OK interrupt!\n"); _rtl_pci_tx_isr(hw, BK_QUEUE); } if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "BE TX OK interrupt!\n"); _rtl_pci_tx_isr(hw, BE_QUEUE); } if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "VI TX OK interrupt!\n"); _rtl_pci_tx_isr(hw, VI_QUEUE); } if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Vo TX OK interrupt!\n"); _rtl_pci_tx_isr(hw, VO_QUEUE); } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) { rtlpriv->link_info.num_tx_inperiod++; RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "CMD TX OK interrupt!\n"); _rtl_pci_tx_isr(hw, TXCMD_QUEUE); } } /*<3> Rx related */ if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n"); _rtl_pci_rx_interrupt(hw); } if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx descriptor unavailable!\n"); _rtl_pci_rx_interrupt(hw); } if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n"); _rtl_pci_rx_interrupt(hw); } /*<4> fw related*/ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) { if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "firmware interrupt!\n"); queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.fwevt_wq, 0); } } /*<5> hsisr related*/ /* Only 8188EE & 8723BE Supported. * If Other ICs Come in, System will corrupt, * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR] * are not initialized */ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE || rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) { RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "hsisr interrupt!\n"); _rtl_pci_hs_interrupt(hw); } } if (rtlpriv->rtlhal.earlymode_enable) tasklet_schedule(&rtlpriv->works.irq_tasklet); done: rtlpriv->cfg->ops->enable_interrupt(hw); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return ret; } static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) { _rtl_pci_tx_chk_waitq(hw); } static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl8192_tx_ring *ring = NULL; struct ieee80211_hdr *hdr = NULL; struct ieee80211_tx_info *info = NULL; struct sk_buff *pskb = NULL; struct rtl_tx_desc *pdesc = NULL; struct rtl_tcb_desc tcb_desc; /*This is for new trx flow*/ struct rtl_tx_buffer_desc *pbuffer_desc = NULL; u8 temp_one = 1; u8 *entry; memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); ring = &rtlpci->tx_ring[BEACON_QUEUE]; pskb = __skb_dequeue(&ring->queue); if (rtlpriv->use_new_trx_flow) entry = (u8 *)(&ring->buffer_desc[ring->idx]); else entry = (u8 *)(&ring->desc[ring->idx]); if (pskb) { pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc( (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), pskb->len, PCI_DMA_TODEVICE); kfree_skb(pskb); } /*NB: the beacon data buffer must be 32-bit aligned. */ pskb = ieee80211_beacon_get(hw, mac->vif); if (pskb == NULL) return; hdr = rtl_get_hdr(pskb); info = IEEE80211_SKB_CB(pskb); pdesc = &ring->desc[0]; if (rtlpriv->use_new_trx_flow) pbuffer_desc = &ring->buffer_desc[0]; rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, (u8 *)pbuffer_desc, info, NULL, pskb, BEACON_QUEUE, &tcb_desc); __skb_queue_tail(&ring->queue, pskb); if (rtlpriv->use_new_trx_flow) { temp_one = 4; rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true, HW_DESC_OWN, (u8 *)&temp_one); } else { rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, &temp_one); } return; } static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 i; u16 desc_num; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) desc_num = TX_DESC_NUM_92E; else desc_num = RT_TXDESC_NUM; for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) rtlpci->txringcount[i] = desc_num; /* *we just alloc 2 desc for beacon queue, *because we just need first desc in hw beacon. */ rtlpci->txringcount[BEACON_QUEUE] = 2; /*BE queue need more descriptor for performance *consideration or, No more tx desc will happen, *and may cause mac80211 mem leakage. */ if (!rtl_priv(hw)->use_new_trx_flow) rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE; rtlpci->rxbuffersize = 9100; /*2048/1024; */ rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */ } static void _rtl_pci_init_struct(struct ieee80211_hw *hw, struct pci_dev *pdev) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); rtlpci->up_first_time = true; rtlpci->being_init_adapter = false; rtlhal->hw = hw; rtlpci->pdev = pdev; /*Tx/Rx related var */ _rtl_pci_init_trx_var(hw); /*IBSS*/ mac->beacon_interval = 100; /*AMPDU*/ mac->min_space_cfg = 0; mac->max_mss_density = 0; /*set sane AMPDU defaults */ mac->current_ampdu_density = 7; mac->current_ampdu_factor = 3; /*QOS*/ rtlpci->acm_method = EACMWAY2_SW; /*task */ tasklet_init(&rtlpriv->works.irq_tasklet, (void (*)(unsigned long))_rtl_pci_irq_tasklet, (unsigned long)hw); tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, (unsigned long)hw); INIT_WORK(&rtlpriv->works.lps_change_work, rtl_lps_change_work_callback); } static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, unsigned int prio, unsigned int entries) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_buffer_desc *buffer_desc; struct rtl_tx_desc *desc; dma_addr_t buffer_desc_dma, desc_dma; u32 nextdescaddress; int i; /* alloc tx buffer desc for new trx flow*/ if (rtlpriv->use_new_trx_flow) { buffer_desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*buffer_desc) * entries, &buffer_desc_dma); if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } rtlpci->tx_ring[prio].buffer_desc = buffer_desc; rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma; rtlpci->tx_ring[prio].cur_tx_rp = 0; rtlpci->tx_ring[prio].cur_tx_wp = 0; rtlpci->tx_ring[prio].avl_desc = entries; } /* alloc dma for this ring */ desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries, &desc_dma); if (!desc || (unsigned long)desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate TX ring (prio = %d)\n", prio); return -ENOMEM; } rtlpci->tx_ring[prio].desc = desc; rtlpci->tx_ring[prio].dma = desc_dma; rtlpci->tx_ring[prio].idx = 0; rtlpci->tx_ring[prio].entries = entries; skb_queue_head_init(&rtlpci->tx_ring[prio].queue); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n", prio, desc); /* init every desc in this ring */ if (!rtlpriv->use_new_trx_flow) { for (i = 0; i < entries; i++) { nextdescaddress = (u32)desc_dma + ((i + 1) % entries) * sizeof(*desc); rtlpriv->cfg->ops->set_desc(hw, (u8 *)&desc[i], true, HW_DESC_TX_NEXTDESC_ADDR, (u8 *)&nextdescaddress); } } return 0; } static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); int i; if (rtlpriv->use_new_trx_flow) { struct rtl_rx_buffer_desc *entry = NULL; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].buffer_desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*rtlpci->rx_ring[rxring_idx]. buffer_desc) * rtlpci->rxringcount, &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].buffer_desc || (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate RX ring\n"); return -ENOMEM; } /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } } else { struct rtl_rx_desc *entry = NULL; u8 tmp_one = 1; /* alloc dma for this ring */ rtlpci->rx_ring[rxring_idx].desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*rtlpci->rx_ring[rxring_idx]. desc) * rtlpci->rxringcount, &rtlpci->rx_ring[rxring_idx].dma); if (!rtlpci->rx_ring[rxring_idx].desc || (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Cannot allocate RX ring\n"); return -ENOMEM; } /* init every desc in this ring */ rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].desc[i]; if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXERO, &tmp_one); } return 0; } static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, unsigned int prio) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; /* free every desc in this ring */ while (skb_queue_len(&ring->queue)) { u8 *entry; struct sk_buff *skb = __skb_dequeue(&ring->queue); if (rtlpriv->use_new_trx_flow) entry = (u8 *)(&ring->buffer_desc[ring->idx]); else entry = (u8 *)(&ring->desc[ring->idx]); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg-> ops->get_desc((u8 *)entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } /* free dma of this ring */ pci_free_consistent(rtlpci->pdev, sizeof(*ring->desc) * ring->entries, ring->desc, ring->dma); ring->desc = NULL; if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, sizeof(*ring->buffer_desc) * ring->entries, ring->buffer_desc, ring->buffer_desc_dma); ring->buffer_desc = NULL; } } static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int i; /* free every desc in this ring */ for (i = 0; i < rtlpci->rxringcount; i++) { struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i]; if (!skb) continue; pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); kfree_skb(skb); } /* free dma of this ring */ if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, sizeof(*rtlpci->rx_ring[rxring_idx]. buffer_desc) * rtlpci->rxringcount, rtlpci->rx_ring[rxring_idx].buffer_desc, rtlpci->rx_ring[rxring_idx].dma); rtlpci->rx_ring[rxring_idx].buffer_desc = NULL; } else { pci_free_consistent(rtlpci->pdev, sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount, rtlpci->rx_ring[rxring_idx].desc, rtlpci->rx_ring[rxring_idx].dma); rtlpci->rx_ring[rxring_idx].desc = NULL; } } static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int ret; int i, rxring_idx; /* rxring_idx 0:RX_MPDU_QUEUE * rxring_idx 1:RX_CMD_QUEUE */ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { ret = _rtl_pci_init_rx_ring(hw, rxring_idx); if (ret) return ret; } for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]); if (ret) goto err_free_rings; } return 0; err_free_rings: for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) _rtl_pci_free_rx_ring(hw, rxring_idx); for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) if (rtlpci->tx_ring[i].desc || rtlpci->tx_ring[i].buffer_desc) _rtl_pci_free_tx_ring(hw, i); return 1; } static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw) { u32 i, rxring_idx; /*free rx rings */ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) _rtl_pci_free_rx_ring(hw, rxring_idx); /*free tx rings */ for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) _rtl_pci_free_tx_ring(hw, i); return 0; } int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); int i, rxring_idx; unsigned long flags; u8 tmp_one = 1; u32 bufferaddress; /* rxring_idx 0:RX_MPDU_QUEUE */ /* rxring_idx 1:RX_CMD_QUEUE */ for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { /* force the rx_ring[RX_MPDU_QUEUE/ * RX_CMD_QUEUE].idx to the first one *new trx flow, do nothing */ if (!rtlpriv->use_new_trx_flow && rtlpci->rx_ring[rxring_idx].desc) { struct rtl_rx_desc *entry = NULL; rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].desc[i]; bufferaddress = rtlpriv->cfg->ops->get_desc((u8 *)entry, false , HW_DESC_RXBUFF_ADDR); memset((u8 *)entry , 0 , sizeof(*rtlpci->rx_ring [rxring_idx].desc));/*clear one entry*/ if (rtlpriv->use_new_trx_flow) { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RX_PREPARE, (u8 *)&bufferaddress); } else { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXBUFF_ADDR, (u8 *)&bufferaddress); rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXPKT_LEN, (u8 *)&rtlpci->rxbuffersize); rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXOWN, (u8 *)&tmp_one); } } rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RXERO, (u8 *)&tmp_one); } rtlpci->rx_ring[rxring_idx].idx = 0; } /* *after reset, release previous pending packet, *and force the tx idx to the first one */ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { if (rtlpci->tx_ring[i].desc || rtlpci->tx_ring[i].buffer_desc) { struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i]; while (skb_queue_len(&ring->queue)) { u8 *entry; struct sk_buff *skb = __skb_dequeue(&ring->queue); if (rtlpriv->use_new_trx_flow) entry = (u8 *)(&ring->buffer_desc [ring->idx]); else entry = (u8 *)(&ring->desc[ring->idx]); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops-> get_desc((u8 *) entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } ring->idx = 0; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return 0; } static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *sta_entry = NULL; u8 tid = rtl_get_tid(skb); __le16 fc = rtl_get_fc(skb); if (!sta) return false; sta_entry = (struct rtl_sta_info *)sta->drv_priv; if (!rtlpriv->rtlhal.earlymode_enable) return false; if (ieee80211_is_nullfunc(fc)) return false; if (ieee80211_is_qos_nullfunc(fc)) return false; if (ieee80211_is_pspoll(fc)) return false; if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL) return false; if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE) return false; if (tid > 7) return false; /* maybe every tid should be checked */ if (!rtlpriv->link_info.higher_busytxtraffic[tid]) return false; spin_lock_bh(&rtlpriv->locks.waitq_lock); skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb); spin_unlock_bh(&rtlpriv->locks.waitq_lock); return true; } static int rtl_pci_tx(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct rtl_tcb_desc *ptcb_desc) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *sta_entry = NULL; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl8192_tx_ring *ring; struct rtl_tx_desc *pdesc; struct rtl_tx_buffer_desc *ptx_bd_desc = NULL; u16 idx; u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb); unsigned long flags; struct ieee80211_hdr *hdr = rtl_get_hdr(skb); __le16 fc = rtl_get_fc(skb); u8 *pda_addr = hdr->addr1; struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*ssn */ u8 tid = 0; u16 seq_number = 0; u8 own; u8 temp_one = 1; if (ieee80211_is_mgmt(fc)) rtl_tx_mgmt_proc(hw, skb); if (rtlpriv->psc.sw_ps_enabled) { if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) && !ieee80211_has_pm(fc)) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); } rtl_action_proc(hw, skb, true); if (is_multicast_ether_addr(pda_addr)) rtlpriv->stats.txbytesmulticast += skb->len; else if (is_broadcast_ether_addr(pda_addr)) rtlpriv->stats.txbytesbroadcast += skb->len; else rtlpriv->stats.txbytesunicast += skb->len; spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); ring = &rtlpci->tx_ring[hw_queue]; if (hw_queue != BEACON_QUEUE) { if (rtlpriv->use_new_trx_flow) idx = ring->cur_tx_wp; else idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; } else { idx = 0; } pdesc = &ring->desc[idx]; if (rtlpriv->use_new_trx_flow) { ptx_bd_desc = &ring->buffer_desc[idx]; } else { own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN); if ((own == 1) && (hw_queue != BEACON_QUEUE)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n", hw_queue, ring->idx, idx, skb_queue_len(&ring->queue)); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return skb->len; } } if (rtlpriv->cfg->ops->get_available_desc && rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "get_available_desc fail\n"); spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); return skb->len; } if (ieee80211_is_data_qos(fc)) { tid = rtl_get_tid(skb); if (sta) { sta_entry = (struct rtl_sta_info *)sta->drv_priv; seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; seq_number += 1; if (!ieee80211_has_morefrags(hdr->frame_control)) sta_entry->tids[tid].seq_number = seq_number; } } if (ieee80211_is_data(fc)) rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc); __skb_queue_tail(&ring->queue, skb); if (rtlpriv->use_new_trx_flow) { rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, &hw_queue); } else { rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, &temp_one); } if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && hw_queue != BEACON_QUEUE) { RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n", hw_queue, ring->idx, idx, skb_queue_len(&ring->queue)); ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); rtlpriv->cfg->ops->tx_polling(hw, hw_queue); return 0; } static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u16 i = 0; int queue_id; struct rtl8192_tx_ring *ring; if (mac->skip_scan) return; for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) { u32 queue_len; if (((queues >> queue_id) & 0x1) == 0) { queue_id--; continue; } ring = &pcipriv->dev.tx_ring[queue_id]; queue_len = skb_queue_len(&ring->queue); if (queue_len == 0 || queue_id == BEACON_QUEUE || queue_id == TXCMD_QUEUE) { queue_id--; continue; } else { msleep(20); i++; } /* we just wait 1s for all queues */ if (rtlpriv->psc.rfpwr_state == ERFOFF || is_hal_stop(rtlhal) || i >= 200) return; } } static void rtl_pci_deinit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); _rtl_pci_deinit_trx_ring(hw); synchronize_irq(rtlpci->pdev->irq); tasklet_kill(&rtlpriv->works.irq_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); } static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) { struct rtl_priv *rtlpriv = rtl_priv(hw); int err; _rtl_pci_init_struct(hw, pdev); err = _rtl_pci_init_trx_ring(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "tx ring initialization failed\n"); return err; } return 0; } static int rtl_pci_start(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); int err; rtl_pci_reset_trx_ring(hw); rtlpci->driver_is_goingto_unload = false; if (rtlpriv->cfg->ops->get_btc_status && rtlpriv->cfg->ops->get_btc_status()) { rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); } err = rtlpriv->cfg->ops->hw_init(hw); if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Failed to config hardware!\n"); return err; } rtlpriv->cfg->ops->enable_interrupt(hw); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n"); rtl_init_rx_config(hw); /*should be after adapter start and interrupt enable. */ set_hal_start(rtlhal); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); rtlpci->up_first_time = false; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "rtl_pci_start OK\n"); return 0; } static void rtl_pci_stop(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); unsigned long flags; u8 RFInProgressTimeOut = 0; if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_halt_notify(); /* *should be before disable interrupt&adapter *and will do it immediately. */ set_hal_stop(rtlhal); rtlpci->driver_is_goingto_unload = true; rtlpriv->cfg->ops->disable_interrupt(hw); cancel_work_sync(&rtlpriv->works.lps_change_work); spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); while (ppsc->rfchange_inprogress) { spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); if (RFInProgressTimeOut > 100) { spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); break; } mdelay(1); RFInProgressTimeOut++; spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); } ppsc->rfchange_inprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); rtlpriv->cfg->ops->hw_disable(hw); /* some things are not needed if firmware not available */ if (!rtlpriv->max_fw_size) return; rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags); ppsc->rfchange_inprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); rtl_pci_enable_aspm(hw); } static bool _rtl_pci_find_adapter(struct pci_dev *pdev, struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct pci_dev *bridge_pdev = pdev->bus->self; u16 venderid; u16 deviceid; u8 revisionid; u16 irqline; u8 tmp; pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN; venderid = pdev->vendor; deviceid = pdev->device; pci_read_config_byte(pdev, 0x8, &revisionid); pci_read_config_word(pdev, 0x3C, &irqline); /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses * r8192e_pci, and RTL8192SE, which uses this driver. If the * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then * the correct driver is r8192e_pci, thus this routine should * return false. */ if (deviceid == RTL_PCI_8192SE_DID && revisionid == RTL_PCI_REVISION_ID_8192PCIE) return false; if (deviceid == RTL_PCI_8192_DID || deviceid == RTL_PCI_0044_DID || deviceid == RTL_PCI_0047_DID || deviceid == RTL_PCI_8192SE_DID || deviceid == RTL_PCI_8174_DID || deviceid == RTL_PCI_8173_DID || deviceid == RTL_PCI_8172_DID || deviceid == RTL_PCI_8171_DID) { switch (revisionid) { case RTL_PCI_REVISION_ID_8192PCIE: RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8192 PCI-E is found - vid/did=%x/%x\n", venderid, deviceid); rtlhal->hw_type = HARDWARE_TYPE_RTL8192E; return false; case RTL_PCI_REVISION_ID_8192SE: RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8192SE is found - vid/did=%x/%x\n", venderid, deviceid); rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Err: Unknown device - vid/did=%x/%x\n", venderid, deviceid); rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE; break; } } else if (deviceid == RTL_PCI_8723AE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8723AE PCI-E is found - " "vid/did=%x/%x\n", venderid, deviceid); } else if (deviceid == RTL_PCI_8192CET_DID || deviceid == RTL_PCI_8192CE_DID || deviceid == RTL_PCI_8191CE_DID || deviceid == RTL_PCI_8188CE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8192C PCI-E is found - vid/did=%x/%x\n", venderid, deviceid); } else if (deviceid == RTL_PCI_8192DE_DID || deviceid == RTL_PCI_8192DE_DID2) { rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "8192D PCI-E is found - vid/did=%x/%x\n", venderid, deviceid); } else if (deviceid == RTL_PCI_8188EE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Find adapter, Hardware type is 8188EE\n"); } else if (deviceid == RTL_PCI_8723BE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE; RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, "Find adapter, Hardware type is 8723BE\n"); } else if (deviceid == RTL_PCI_8192EE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE; RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, "Find adapter, Hardware type is 8192EE\n"); } else if (deviceid == RTL_PCI_8821AE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE; RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, "Find adapter, Hardware type is 8821AE\n"); } else if (deviceid == RTL_PCI_8812AE_DID) { rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE; RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD, "Find adapter, Hardware type is 8812AE\n"); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Err: Unknown device - vid/did=%x/%x\n", venderid, deviceid); rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE; } if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) { if (revisionid == 0 || revisionid == 1) { if (revisionid == 0) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Find 92DE MAC0\n"); rtlhal->interfaceindex = 0; } else if (revisionid == 1) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Find 92DE MAC1\n"); rtlhal->interfaceindex = 1; } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n", venderid, deviceid, revisionid); rtlhal->interfaceindex = 0; } } /* 92ee use new trx flow */ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) rtlpriv->use_new_trx_flow = true; else rtlpriv->use_new_trx_flow = false; /*find bus info */ pcipriv->ndis_adapter.busnumber = pdev->bus->number; pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); /*find bridge info */ pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN; /* some ARM have no bridge_pdev and will crash here * so we should check if bridge_pdev is NULL */ if (bridge_pdev) { /*find bridge info if available */ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { pcipriv->ndis_adapter.pcibridge_vendor = tmp; RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Pci Bridge Vendor is found index: %d\n", tmp); break; } } } if (pcipriv->ndis_adapter.pcibridge_vendor != PCI_BRIDGE_VENDOR_UNKNOWN) { pcipriv->ndis_adapter.pcibridge_busnum = bridge_pdev->bus->number; pcipriv->ndis_adapter.pcibridge_devnum = PCI_SLOT(bridge_pdev->devfn); pcipriv->ndis_adapter.pcibridge_funcnum = PCI_FUNC(bridge_pdev->devfn); pcipriv->ndis_adapter.pcibridge_pciehdr_offset = pci_pcie_cap(bridge_pdev); pcipriv->ndis_adapter.num4bytes = (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4; rtl_pci_get_linkcontrol_field(hw); if (pcipriv->ndis_adapter.pcibridge_vendor == PCI_BRIDGE_VENDOR_AMD) { pcipriv->ndis_adapter.amd_l1_patch = rtl_pci_get_amd_l1_patch(hw); } } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n", pcipriv->ndis_adapter.busnumber, pcipriv->ndis_adapter.devnumber, pcipriv->ndis_adapter.funcnumber, pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg); RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n", pcipriv->ndis_adapter.pcibridge_busnum, pcipriv->ndis_adapter.pcibridge_devnum, pcipriv->ndis_adapter.pcibridge_funcnum, pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor], pcipriv->ndis_adapter.pcibridge_pciehdr_offset, pcipriv->ndis_adapter.pcibridge_linkctrlreg, pcipriv->ndis_adapter.amd_l1_patch); rtl_pci_parse_configuration(pdev, hw); list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list); return true; } static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); int ret; ret = pci_enable_msi(rtlpci->pdev); if (ret < 0) return ret; ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, IRQF_SHARED, KBUILD_MODNAME, hw); if (ret < 0) { pci_disable_msi(rtlpci->pdev); return ret; } rtlpci->using_msi = true; RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG, "MSI Interrupt Mode!\n"); return 0; } static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); int ret; ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, IRQF_SHARED, KBUILD_MODNAME, hw); if (ret < 0) return ret; rtlpci->using_msi = false; RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG, "Pin-based Interrupt Mode!\n"); return 0; } static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw) { struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); int ret; if (rtlpci->msi_support) { ret = rtl_pci_intr_mode_msi(hw); if (ret < 0) ret = rtl_pci_intr_mode_legacy(hw); } else { ret = rtl_pci_intr_mode_legacy(hw); } return ret; } int rtl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ieee80211_hw *hw = NULL; struct rtl_priv *rtlpriv = NULL; struct rtl_pci_priv *pcipriv = NULL; struct rtl_pci *rtlpci; unsigned long pmem_start, pmem_len, pmem_flags; int err; err = pci_enable_device(pdev); if (err) { RT_ASSERT(false, "%s : Cannot enable new PCI device\n", pci_name(pdev)); return err; } if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { RT_ASSERT(false, "Unable to obtain 32bit DMA for consistent allocations\n"); err = -ENOMEM; goto fail1; } } pci_set_master(pdev); hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) + sizeof(struct rtl_priv), &rtl_ops); if (!hw) { RT_ASSERT(false, "%s : ieee80211 alloc failed\n", pci_name(pdev)); err = -ENOMEM; goto fail1; } SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); rtlpriv = hw->priv; rtlpriv->hw = hw; pcipriv = (void *)rtlpriv->priv; pcipriv->dev.pdev = pdev; init_completion(&rtlpriv->firmware_loading_complete); /*proximity init here*/ rtlpriv->proximity.proxim_on = false; pcipriv = (void *)rtlpriv->priv; pcipriv->dev.pdev = pdev; /* init cfg & intf_ops */ rtlpriv->rtlhal.interface = INTF_PCI; rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); rtlpriv->intf_ops = &rtl_pci_ops; rtlpriv->glb_var = &rtl_global_var; /* *init dbgp flags before all *other functions, because we will *use it in other funtions like *RT_TRACE/RT_PRINT/RTL_PRINT_DATA *you can not use these macro *before this */ rtl_dbgp_flag_init(hw); /* MEM map */ err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) { RT_ASSERT(false, "Can't obtain PCI resources\n"); goto fail1; } pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id); pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id); pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id); /*shared mem start */ rtlpriv->io.pci_mem_start = (unsigned long)pci_iomap(pdev, rtlpriv->cfg->bar_id, pmem_len); if (rtlpriv->io.pci_mem_start == 0) { RT_ASSERT(false, "Can't map PCI mem\n"); err = -ENOMEM; goto fail2; } RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n", pmem_start, pmem_len, pmem_flags, rtlpriv->io.pci_mem_start); /* Disable Clk Request */ pci_write_config_byte(pdev, 0x81, 0); /* leave D3 mode */ pci_write_config_byte(pdev, 0x44, 0); pci_write_config_byte(pdev, 0x04, 0x06); pci_write_config_byte(pdev, 0x04, 0x07); /* find adapter */ if (!_rtl_pci_find_adapter(pdev, hw)) { err = -ENODEV; goto fail3; } /* Init IO handler */ _rtl_pci_io_handler_init(&pdev->dev, hw); /*like read eeprom and so on */ rtlpriv->cfg->ops->read_eeprom_info(hw); if (rtlpriv->cfg->ops->init_sw_vars(hw)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n"); err = -ENODEV; goto fail3; } rtlpriv->cfg->ops->init_sw_leds(hw); /*aspm */ rtl_pci_init_aspm(hw); /* Init mac80211 sw */ err = rtl_init_core(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't allocate sw for mac80211\n"); goto fail3; } /* Init PCI sw */ err = rtl_pci_init(hw, pdev); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n"); goto fail3; } err = ieee80211_register_hw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't register mac80211 hw.\n"); err = -ENODEV; goto fail3; } rtlpriv->mac80211.mac80211_registered = 1; err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "failed to create sysfs device attributes\n"); goto fail3; } /*init rfkill */ rtl_init_rfkill(hw); /* Init PCI sw */ rtlpci = rtl_pcidev(pcipriv); err = rtl_pci_intr_mode_decide(hw); if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s: failed to register IRQ handler\n", wiphy_name(hw->wiphy)); goto fail3; } rtlpci->irq_alloc = 1; set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; fail3: pci_set_drvdata(pdev, NULL); rtl_deinit_core(hw); if (rtlpriv->io.pci_mem_start != 0) pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); fail2: pci_release_regions(pdev); complete(&rtlpriv->firmware_loading_complete); fail1: if (hw) ieee80211_free_hw(hw); pci_disable_device(pdev); return err; } EXPORT_SYMBOL(rtl_pci_probe); void rtl_pci_disconnect(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); struct rtl_mac *rtlmac = rtl_mac(rtlpriv); /* just in case driver is removed before firmware callback */ wait_for_completion(&rtlpriv->firmware_loading_complete); clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group); /*ieee80211_unregister_hw will call ops_stop */ if (rtlmac->mac80211_registered == 1) { ieee80211_unregister_hw(hw); rtlmac->mac80211_registered = 0; } else { rtl_deinit_deferred_work(hw); rtlpriv->intf_ops->adapter_stop(hw); } rtlpriv->cfg->ops->disable_interrupt(hw); /*deinit rfkill */ rtl_deinit_rfkill(hw); rtl_pci_deinit(hw); rtl_deinit_core(hw); rtlpriv->cfg->ops->deinit_sw_vars(hw); if (rtlpci->irq_alloc) { synchronize_irq(rtlpci->pdev->irq); free_irq(rtlpci->pdev->irq, hw); rtlpci->irq_alloc = 0; } if (rtlpci->using_msi) pci_disable_msi(rtlpci->pdev); list_del(&rtlpriv->list); if (rtlpriv->io.pci_mem_start != 0) { pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); pci_release_regions(pdev); } pci_disable_device(pdev); rtl_pci_disable_aspm(hw); pci_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtl_pci_disconnect); #ifdef CONFIG_PM_SLEEP /*************************************** kernel pci power state define: PCI_D0 ((pci_power_t __force) 0) PCI_D1 ((pci_power_t __force) 1) PCI_D2 ((pci_power_t __force) 2) PCI_D3hot ((pci_power_t __force) 3) PCI_D3cold ((pci_power_t __force) 4) PCI_UNKNOWN ((pci_power_t __force) 5) This function is called when system goes into suspend state mac80211 will call rtl_mac_stop() from the mac80211 suspend function first, So there is no need to call hw_disable here. ****************************************/ int rtl_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->hw_suspend(hw); rtl_deinit_rfkill(hw); return 0; } EXPORT_SYMBOL(rtl_pci_suspend); int rtl_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->cfg->ops->hw_resume(hw); rtl_init_rfkill(hw); return 0; } EXPORT_SYMBOL(rtl_pci_resume); #endif /* CONFIG_PM_SLEEP */ struct rtl_intf_ops rtl_pci_ops = { .read_efuse_byte = read_efuse_byte, .adapter_start = rtl_pci_start, .adapter_stop = rtl_pci_stop, .check_buddy_priv = rtl_pci_check_buddy_priv, .adapter_tx = rtl_pci_tx, .flush = rtl_pci_flush, .reset_trx_ring = rtl_pci_reset_trx_ring, .waitq_insert = rtl_pci_tx_chk_waitq_insert, .disable_aspm = rtl_pci_disable_aspm, .enable_aspm = rtl_pci_enable_aspm, };
gpl-2.0
civato/P900-Lollipop
drivers/video/omap2/dss/apply.c
1988
30491
/* * Copyright (C) 2011 Texas Instruments * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "APPLY" #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <video/omapdss.h> #include "dss.h" #include "dss_features.h" /* * We have 4 levels of cache for the dispc settings. First two are in SW and * the latter two in HW. * * set_info() * v * +--------------------+ * | user_info | * +--------------------+ * v * apply() * v * +--------------------+ * | info | * +--------------------+ * v * write_regs() * v * +--------------------+ * | shadow registers | * +--------------------+ * v * VFP or lcd/digit_enable * v * +--------------------+ * | registers | * +--------------------+ */ struct ovl_priv_data { bool user_info_dirty; struct omap_overlay_info user_info; bool info_dirty; struct omap_overlay_info info; bool shadow_info_dirty; bool extra_info_dirty; bool shadow_extra_info_dirty; bool enabled; enum omap_channel channel; u32 fifo_low, fifo_high; /* * True if overlay is to be enabled. Used to check and calculate configs * for the overlay before it is enabled in the HW. */ bool enabling; }; struct mgr_priv_data { bool user_info_dirty; struct omap_overlay_manager_info user_info; bool info_dirty; struct omap_overlay_manager_info info; bool shadow_info_dirty; /* If true, GO bit is up and shadow registers cannot be written. * Never true for manual update displays */ bool busy; /* If true, dispc output is enabled */ bool updating; /* If true, a display is enabled using this manager */ bool enabled; }; static struct { struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS]; struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS]; bool fifo_merge_dirty; bool fifo_merge; bool irq_enabled; } dss_data; /* protects dss_data */ static spinlock_t data_lock; /* lock for blocking functions */ static DEFINE_MUTEX(apply_lock); static DECLARE_COMPLETION(extra_updated_completion); static void dss_register_vsync_isr(void); static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl) { return &dss_data.ovl_priv_data_array[ovl->id]; } static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr) { return &dss_data.mgr_priv_data_array[mgr->id]; } void dss_apply_init(void) { const int num_ovls = dss_feat_get_num_ovls(); int i; spin_lock_init(&data_lock); for (i = 0; i < num_ovls; ++i) { struct ovl_priv_data *op; op = &dss_data.ovl_priv_data_array[i]; op->info.global_alpha = 255; switch (i) { case 0: op->info.zorder = 0; break; case 1: op->info.zorder = dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0; break; case 2: op->info.zorder = dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0; break; case 3: op->info.zorder = dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0; break; } op->user_info = op->info; } } static bool ovl_manual_update(struct omap_overlay *ovl) { return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; } static bool mgr_manual_update(struct omap_overlay_manager *mgr) { return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; } static int dss_check_settings_low(struct omap_overlay_manager *mgr, struct omap_dss_device *dssdev, bool applying) { struct omap_overlay_info *oi; struct omap_overlay_manager_info *mi; struct omap_overlay *ovl; struct omap_overlay_info *ois[MAX_DSS_OVERLAYS]; struct ovl_priv_data *op; struct mgr_priv_data *mp; mp = get_mgr_priv(mgr); if (applying && mp->user_info_dirty) mi = &mp->user_info; else mi = &mp->info; /* collect the infos to be tested into the array */ list_for_each_entry(ovl, &mgr->overlays, list) { op = get_ovl_priv(ovl); if (!op->enabled && !op->enabling) oi = NULL; else if (applying && op->user_info_dirty) oi = &op->user_info; else oi = &op->info; ois[ovl->id] = oi; } return dss_mgr_check(mgr, dssdev, mi, ois); } /* * check manager and overlay settings using overlay_info from data->info */ static int dss_check_settings(struct omap_overlay_manager *mgr, struct omap_dss_device *dssdev) { return dss_check_settings_low(mgr, dssdev, false); } /* * check manager and overlay settings using overlay_info from ovl->info if * dirty and from data->info otherwise */ static int dss_check_settings_apply(struct omap_overlay_manager *mgr, struct omap_dss_device *dssdev) { return dss_check_settings_low(mgr, dssdev, true); } static bool need_isr(void) { const int num_mgrs = dss_feat_get_num_mgrs(); int i; for (i = 0; i < num_mgrs; ++i) { struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; struct omap_overlay *ovl; mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (!mp->enabled) continue; if (mgr_manual_update(mgr)) { /* to catch FRAMEDONE */ if (mp->updating) return true; } else { /* to catch GO bit going down */ if (mp->busy) return true; /* to write new values to registers */ if (mp->info_dirty) return true; /* to set GO bit */ if (mp->shadow_info_dirty) return true; list_for_each_entry(ovl, &mgr->overlays, list) { struct ovl_priv_data *op; op = get_ovl_priv(ovl); /* * NOTE: we check extra_info flags even for * disabled overlays, as extra_infos need to be * always written. */ /* to write new values to registers */ if (op->extra_info_dirty) return true; /* to set GO bit */ if (op->shadow_extra_info_dirty) return true; if (!op->enabled) continue; /* to write new values to registers */ if (op->info_dirty) return true; /* to set GO bit */ if (op->shadow_info_dirty) return true; } } } return false; } static bool need_go(struct omap_overlay_manager *mgr) { struct omap_overlay *ovl; struct mgr_priv_data *mp; struct ovl_priv_data *op; mp = get_mgr_priv(mgr); if (mp->shadow_info_dirty) return true; list_for_each_entry(ovl, &mgr->overlays, list) { op = get_ovl_priv(ovl); if (op->shadow_info_dirty || op->shadow_extra_info_dirty) return true; } return false; } /* returns true if an extra_info field is currently being updated */ static bool extra_info_update_ongoing(void) { const int num_ovls = omap_dss_get_num_overlays(); struct ovl_priv_data *op; struct omap_overlay *ovl; struct mgr_priv_data *mp; int i; for (i = 0; i < num_ovls; ++i) { ovl = omap_dss_get_overlay(i); op = get_ovl_priv(ovl); if (!ovl->manager) continue; mp = get_mgr_priv(ovl->manager); if (!mp->enabled) continue; if (!mp->updating) continue; if (op->extra_info_dirty || op->shadow_extra_info_dirty) return true; } return false; } /* wait until no extra_info updates are pending */ static void wait_pending_extra_info_updates(void) { bool updating; unsigned long flags; unsigned long t; int r; spin_lock_irqsave(&data_lock, flags); updating = extra_info_update_ongoing(); if (!updating) { spin_unlock_irqrestore(&data_lock, flags); return; } init_completion(&extra_updated_completion); spin_unlock_irqrestore(&data_lock, flags); t = msecs_to_jiffies(500); r = wait_for_completion_timeout(&extra_updated_completion, t); if (r == 0) DSSWARN("timeout in wait_pending_extra_info_updates\n"); else if (r < 0) DSSERR("wait_pending_extra_info_updates failed: %d\n", r); } int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) { unsigned long timeout = msecs_to_jiffies(500); struct mgr_priv_data *mp; u32 irq; int r; int i; struct omap_dss_device *dssdev = mgr->device; if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return 0; if (mgr_manual_update(mgr)) return 0; r = dispc_runtime_get(); if (r) return r; irq = dispc_mgr_get_vsync_irq(mgr->id); mp = get_mgr_priv(mgr); i = 0; while (1) { unsigned long flags; bool shadow_dirty, dirty; spin_lock_irqsave(&data_lock, flags); dirty = mp->info_dirty; shadow_dirty = mp->shadow_info_dirty; spin_unlock_irqrestore(&data_lock, flags); if (!dirty && !shadow_dirty) { r = 0; break; } /* 4 iterations is the worst case: * 1 - initial iteration, dirty = true (between VFP and VSYNC) * 2 - first VSYNC, dirty = true * 3 - dirty = false, shadow_dirty = true * 4 - shadow_dirty = false */ if (i++ == 3) { DSSERR("mgr(%d)->wait_for_go() not finishing\n", mgr->id); r = 0; break; } r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); if (r == -ERESTARTSYS) break; if (r) { DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id); break; } } dispc_runtime_put(); return r; } int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) { unsigned long timeout = msecs_to_jiffies(500); struct ovl_priv_data *op; struct omap_dss_device *dssdev; u32 irq; int r; int i; if (!ovl->manager) return 0; dssdev = ovl->manager->device; if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return 0; if (ovl_manual_update(ovl)) return 0; r = dispc_runtime_get(); if (r) return r; irq = dispc_mgr_get_vsync_irq(ovl->manager->id); op = get_ovl_priv(ovl); i = 0; while (1) { unsigned long flags; bool shadow_dirty, dirty; spin_lock_irqsave(&data_lock, flags); dirty = op->info_dirty; shadow_dirty = op->shadow_info_dirty; spin_unlock_irqrestore(&data_lock, flags); if (!dirty && !shadow_dirty) { r = 0; break; } /* 4 iterations is the worst case: * 1 - initial iteration, dirty = true (between VFP and VSYNC) * 2 - first VSYNC, dirty = true * 3 - dirty = false, shadow_dirty = true * 4 - shadow_dirty = false */ if (i++ == 3) { DSSERR("ovl(%d)->wait_for_go() not finishing\n", ovl->id); r = 0; break; } r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); if (r == -ERESTARTSYS) break; if (r) { DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id); break; } } dispc_runtime_put(); return r; } static void dss_ovl_write_regs(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct omap_overlay_info *oi; bool ilace, replication; struct mgr_priv_data *mp; int r; DSSDBGF("%d", ovl->id); if (!op->enabled || !op->info_dirty) return; oi = &op->info; replication = dss_use_replication(ovl->manager->device, oi->color_mode); ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; r = dispc_ovl_setup(ovl->id, oi, ilace, replication); if (r) { /* * We can't do much here, as this function can be called from * vsync interrupt. */ DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id); /* This will leave fifo configurations in a nonoptimal state */ op->enabled = false; dispc_ovl_enable(ovl->id, false); return; } mp = get_mgr_priv(ovl->manager); op->info_dirty = false; if (mp->updating) op->shadow_info_dirty = true; } static void dss_ovl_write_regs_extra(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct mgr_priv_data *mp; DSSDBGF("%d", ovl->id); if (!op->extra_info_dirty) return; /* note: write also when op->enabled == false, so that the ovl gets * disabled */ dispc_ovl_enable(ovl->id, op->enabled); dispc_ovl_set_channel_out(ovl->id, op->channel); dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high); mp = get_mgr_priv(ovl->manager); op->extra_info_dirty = false; if (mp->updating) op->shadow_extra_info_dirty = true; } static void dss_mgr_write_regs(struct omap_overlay_manager *mgr) { struct mgr_priv_data *mp = get_mgr_priv(mgr); struct omap_overlay *ovl; DSSDBGF("%d", mgr->id); if (!mp->enabled) return; WARN_ON(mp->busy); /* Commit overlay settings */ list_for_each_entry(ovl, &mgr->overlays, list) { dss_ovl_write_regs(ovl); dss_ovl_write_regs_extra(ovl); } if (mp->info_dirty) { dispc_mgr_setup(mgr->id, &mp->info); mp->info_dirty = false; if (mp->updating) mp->shadow_info_dirty = true; } } static void dss_write_regs_common(void) { const int num_mgrs = omap_dss_get_num_overlay_managers(); int i; if (!dss_data.fifo_merge_dirty) return; for (i = 0; i < num_mgrs; ++i) { struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (mp->enabled) { if (dss_data.fifo_merge_dirty) { dispc_enable_fifomerge(dss_data.fifo_merge); dss_data.fifo_merge_dirty = false; } if (mp->updating) mp->shadow_info_dirty = true; } } } static void dss_write_regs(void) { const int num_mgrs = omap_dss_get_num_overlay_managers(); int i; dss_write_regs_common(); for (i = 0; i < num_mgrs; ++i) { struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; int r; mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) continue; r = dss_check_settings(mgr, mgr->device); if (r) { DSSERR("cannot write registers for manager %s: " "illegal configuration\n", mgr->name); continue; } dss_mgr_write_regs(mgr); } } static void dss_set_go_bits(void) { const int num_mgrs = omap_dss_get_num_overlay_managers(); int i; for (i = 0; i < num_mgrs; ++i) { struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) continue; if (!need_go(mgr)) continue; mp->busy = true; if (!dss_data.irq_enabled && need_isr()) dss_register_vsync_isr(); dispc_mgr_go(mgr->id); } } static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr) { struct omap_overlay *ovl; struct mgr_priv_data *mp; struct ovl_priv_data *op; mp = get_mgr_priv(mgr); mp->shadow_info_dirty = false; list_for_each_entry(ovl, &mgr->overlays, list) { op = get_ovl_priv(ovl); op->shadow_info_dirty = false; op->shadow_extra_info_dirty = false; } } void dss_mgr_start_update(struct omap_overlay_manager *mgr) { struct mgr_priv_data *mp = get_mgr_priv(mgr); unsigned long flags; int r; spin_lock_irqsave(&data_lock, flags); WARN_ON(mp->updating); r = dss_check_settings(mgr, mgr->device); if (r) { DSSERR("cannot start manual update: illegal configuration\n"); spin_unlock_irqrestore(&data_lock, flags); return; } dss_mgr_write_regs(mgr); dss_write_regs_common(); mp->updating = true; if (!dss_data.irq_enabled && need_isr()) dss_register_vsync_isr(); dispc_mgr_enable(mgr->id, true); mgr_clear_shadow_dirty(mgr); spin_unlock_irqrestore(&data_lock, flags); } static void dss_apply_irq_handler(void *data, u32 mask); static void dss_register_vsync_isr(void) { const int num_mgrs = dss_feat_get_num_mgrs(); u32 mask; int r, i; mask = 0; for (i = 0; i < num_mgrs; ++i) mask |= dispc_mgr_get_vsync_irq(i); for (i = 0; i < num_mgrs; ++i) mask |= dispc_mgr_get_framedone_irq(i); r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask); WARN_ON(r); dss_data.irq_enabled = true; } static void dss_unregister_vsync_isr(void) { const int num_mgrs = dss_feat_get_num_mgrs(); u32 mask; int r, i; mask = 0; for (i = 0; i < num_mgrs; ++i) mask |= dispc_mgr_get_vsync_irq(i); for (i = 0; i < num_mgrs; ++i) mask |= dispc_mgr_get_framedone_irq(i); r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask); WARN_ON(r); dss_data.irq_enabled = false; } static void dss_apply_irq_handler(void *data, u32 mask) { const int num_mgrs = dss_feat_get_num_mgrs(); int i; bool extra_updating; spin_lock(&data_lock); /* clear busy, updating flags, shadow_dirty flags */ for (i = 0; i < num_mgrs; i++) { struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; bool was_updating; mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (!mp->enabled) continue; was_updating = mp->updating; mp->updating = dispc_mgr_is_enabled(i); if (!mgr_manual_update(mgr)) { bool was_busy = mp->busy; mp->busy = dispc_mgr_go_busy(i); if (was_busy && !mp->busy) mgr_clear_shadow_dirty(mgr); } } dss_write_regs(); dss_set_go_bits(); extra_updating = extra_info_update_ongoing(); if (!extra_updating) complete_all(&extra_updated_completion); if (!need_isr()) dss_unregister_vsync_isr(); spin_unlock(&data_lock); } static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl) { struct ovl_priv_data *op; op = get_ovl_priv(ovl); if (!op->user_info_dirty) return; op->user_info_dirty = false; op->info_dirty = true; op->info = op->user_info; } static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr) { struct mgr_priv_data *mp; mp = get_mgr_priv(mgr); if (!mp->user_info_dirty) return; mp->user_info_dirty = false; mp->info_dirty = true; mp->info = mp->user_info; } int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) { unsigned long flags; struct omap_overlay *ovl; int r; DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); spin_lock_irqsave(&data_lock, flags); r = dss_check_settings_apply(mgr, mgr->device); if (r) { spin_unlock_irqrestore(&data_lock, flags); DSSERR("failed to apply settings: illegal configuration.\n"); return r; } /* Configure overlays */ list_for_each_entry(ovl, &mgr->overlays, list) omap_dss_mgr_apply_ovl(ovl); /* Configure manager */ omap_dss_mgr_apply_mgr(mgr); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); return 0; } static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable) { struct ovl_priv_data *op; op = get_ovl_priv(ovl); if (op->enabled == enable) return; op->enabled = enable; op->extra_info_dirty = true; } static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl, u32 fifo_low, u32 fifo_high) { struct ovl_priv_data *op = get_ovl_priv(ovl); if (op->fifo_low == fifo_low && op->fifo_high == fifo_high) return; op->fifo_low = fifo_low; op->fifo_high = fifo_high; op->extra_info_dirty = true; } static void dss_apply_fifo_merge(bool use_fifo_merge) { if (dss_data.fifo_merge == use_fifo_merge) return; dss_data.fifo_merge = use_fifo_merge; dss_data.fifo_merge_dirty = true; } static void dss_ovl_setup_fifo(struct omap_overlay *ovl, bool use_fifo_merge) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct omap_dss_device *dssdev; u32 fifo_low, fifo_high; if (!op->enabled && !op->enabling) return; dssdev = ovl->manager->device; dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high, use_fifo_merge, ovl_manual_update(ovl)); dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); } static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr, bool use_fifo_merge) { struct omap_overlay *ovl; struct mgr_priv_data *mp; mp = get_mgr_priv(mgr); if (!mp->enabled) return; list_for_each_entry(ovl, &mgr->overlays, list) dss_ovl_setup_fifo(ovl, use_fifo_merge); } static void dss_setup_fifos(bool use_fifo_merge) { const int num_mgrs = omap_dss_get_num_overlay_managers(); struct omap_overlay_manager *mgr; int i; for (i = 0; i < num_mgrs; ++i) { mgr = omap_dss_get_overlay_manager(i); dss_mgr_setup_fifos(mgr, use_fifo_merge); } } static int get_num_used_managers(void) { const int num_mgrs = omap_dss_get_num_overlay_managers(); struct omap_overlay_manager *mgr; struct mgr_priv_data *mp; int i; int enabled_mgrs; enabled_mgrs = 0; for (i = 0; i < num_mgrs; ++i) { mgr = omap_dss_get_overlay_manager(i); mp = get_mgr_priv(mgr); if (!mp->enabled) continue; enabled_mgrs++; } return enabled_mgrs; } static int get_num_used_overlays(void) { const int num_ovls = omap_dss_get_num_overlays(); struct omap_overlay *ovl; struct ovl_priv_data *op; struct mgr_priv_data *mp; int i; int enabled_ovls; enabled_ovls = 0; for (i = 0; i < num_ovls; ++i) { ovl = omap_dss_get_overlay(i); op = get_ovl_priv(ovl); if (!op->enabled && !op->enabling) continue; mp = get_mgr_priv(ovl->manager); if (!mp->enabled) continue; enabled_ovls++; } return enabled_ovls; } static bool get_use_fifo_merge(void) { int enabled_mgrs = get_num_used_managers(); int enabled_ovls = get_num_used_overlays(); if (!dss_has_feature(FEAT_FIFO_MERGE)) return false; /* * In theory the only requirement for fifomerge is enabled_ovls <= 1. * However, if we have two managers enabled and set/unset the fifomerge, * we need to set the GO bits in particular sequence for the managers, * and wait in between. * * This is rather difficult as new apply calls can happen at any time, * so we simplify the problem by requiring also that enabled_mgrs <= 1. * In practice this shouldn't matter, because when only one overlay is * enabled, most likely only one output is enabled. */ return enabled_mgrs <= 1 && enabled_ovls <= 1; } int dss_mgr_enable(struct omap_overlay_manager *mgr) { struct mgr_priv_data *mp = get_mgr_priv(mgr); unsigned long flags; int r; bool fifo_merge; mutex_lock(&apply_lock); if (mp->enabled) goto out; spin_lock_irqsave(&data_lock, flags); mp->enabled = true; r = dss_check_settings(mgr, mgr->device); if (r) { DSSERR("failed to enable manager %d: check_settings failed\n", mgr->id); goto err; } /* step 1: setup fifos/fifomerge before enabling the manager */ fifo_merge = get_use_fifo_merge(); dss_setup_fifos(fifo_merge); dss_apply_fifo_merge(fifo_merge); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); /* wait until fifo config is in */ wait_pending_extra_info_updates(); /* step 2: enable the manager */ spin_lock_irqsave(&data_lock, flags); if (!mgr_manual_update(mgr)) mp->updating = true; spin_unlock_irqrestore(&data_lock, flags); if (!mgr_manual_update(mgr)) dispc_mgr_enable(mgr->id, true); out: mutex_unlock(&apply_lock); return 0; err: mp->enabled = false; spin_unlock_irqrestore(&data_lock, flags); mutex_unlock(&apply_lock); return r; } void dss_mgr_disable(struct omap_overlay_manager *mgr) { struct mgr_priv_data *mp = get_mgr_priv(mgr); unsigned long flags; bool fifo_merge; mutex_lock(&apply_lock); if (!mp->enabled) goto out; if (!mgr_manual_update(mgr)) dispc_mgr_enable(mgr->id, false); spin_lock_irqsave(&data_lock, flags); mp->updating = false; mp->enabled = false; fifo_merge = get_use_fifo_merge(); dss_setup_fifos(fifo_merge); dss_apply_fifo_merge(fifo_merge); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); wait_pending_extra_info_updates(); out: mutex_unlock(&apply_lock); } int dss_mgr_set_info(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info) { struct mgr_priv_data *mp = get_mgr_priv(mgr); unsigned long flags; int r; r = dss_mgr_simple_check(mgr, info); if (r) return r; spin_lock_irqsave(&data_lock, flags); mp->user_info = *info; mp->user_info_dirty = true; spin_unlock_irqrestore(&data_lock, flags); return 0; } void dss_mgr_get_info(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info) { struct mgr_priv_data *mp = get_mgr_priv(mgr); unsigned long flags; spin_lock_irqsave(&data_lock, flags); *info = mp->user_info; spin_unlock_irqrestore(&data_lock, flags); } int dss_mgr_set_device(struct omap_overlay_manager *mgr, struct omap_dss_device *dssdev) { int r; mutex_lock(&apply_lock); if (dssdev->manager) { DSSERR("display '%s' already has a manager '%s'\n", dssdev->name, dssdev->manager->name); r = -EINVAL; goto err; } if ((mgr->supported_displays & dssdev->type) == 0) { DSSERR("display '%s' does not support manager '%s'\n", dssdev->name, mgr->name); r = -EINVAL; goto err; } dssdev->manager = mgr; mgr->device = dssdev; mutex_unlock(&apply_lock); return 0; err: mutex_unlock(&apply_lock); return r; } int dss_mgr_unset_device(struct omap_overlay_manager *mgr) { int r; mutex_lock(&apply_lock); if (!mgr->device) { DSSERR("failed to unset display, display not set.\n"); r = -EINVAL; goto err; } /* * Don't allow currently enabled displays to have the overlay manager * pulled out from underneath them */ if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err; } mgr->device->manager = NULL; mgr->device = NULL; mutex_unlock(&apply_lock); return 0; err: mutex_unlock(&apply_lock); return r; } int dss_ovl_set_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; int r; r = dss_ovl_simple_check(ovl, info); if (r) return r; spin_lock_irqsave(&data_lock, flags); op->user_info = *info; op->user_info_dirty = true; spin_unlock_irqrestore(&data_lock, flags); return 0; } void dss_ovl_get_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; spin_lock_irqsave(&data_lock, flags); *info = op->user_info; spin_unlock_irqrestore(&data_lock, flags); } int dss_ovl_set_manager(struct omap_overlay *ovl, struct omap_overlay_manager *mgr) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; int r; if (!mgr) return -EINVAL; mutex_lock(&apply_lock); if (ovl->manager) { DSSERR("overlay '%s' already has a manager '%s'\n", ovl->name, ovl->manager->name); r = -EINVAL; goto err; } spin_lock_irqsave(&data_lock, flags); if (op->enabled) { spin_unlock_irqrestore(&data_lock, flags); DSSERR("overlay has to be disabled to change the manager\n"); r = -EINVAL; goto err; } op->channel = mgr->id; op->extra_info_dirty = true; ovl->manager = mgr; list_add_tail(&ovl->list, &mgr->overlays); spin_unlock_irqrestore(&data_lock, flags); /* XXX: When there is an overlay on a DSI manual update display, and * the overlay is first disabled, then moved to tv, and enabled, we * seem to get SYNC_LOST_DIGIT error. * * Waiting doesn't seem to help, but updating the manual update display * after disabling the overlay seems to fix this. This hints that the * overlay is perhaps somehow tied to the LCD output until the output * is updated. * * Userspace workaround for this is to update the LCD after disabling * the overlay, but before moving the overlay to TV. */ mutex_unlock(&apply_lock); return 0; err: mutex_unlock(&apply_lock); return r; } int dss_ovl_unset_manager(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; int r; mutex_lock(&apply_lock); if (!ovl->manager) { DSSERR("failed to detach overlay: manager not set\n"); r = -EINVAL; goto err; } spin_lock_irqsave(&data_lock, flags); if (op->enabled) { spin_unlock_irqrestore(&data_lock, flags); DSSERR("overlay has to be disabled to unset the manager\n"); r = -EINVAL; goto err; } op->channel = -1; ovl->manager = NULL; list_del(&ovl->list); spin_unlock_irqrestore(&data_lock, flags); mutex_unlock(&apply_lock); return 0; err: mutex_unlock(&apply_lock); return r; } bool dss_ovl_is_enabled(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; bool e; spin_lock_irqsave(&data_lock, flags); e = op->enabled; spin_unlock_irqrestore(&data_lock, flags); return e; } int dss_ovl_enable(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; bool fifo_merge; int r; mutex_lock(&apply_lock); if (op->enabled) { r = 0; goto err1; } if (ovl->manager == NULL || ovl->manager->device == NULL) { r = -EINVAL; goto err1; } spin_lock_irqsave(&data_lock, flags); op->enabling = true; r = dss_check_settings(ovl->manager, ovl->manager->device); if (r) { DSSERR("failed to enable overlay %d: check_settings failed\n", ovl->id); goto err2; } /* step 1: configure fifos/fifomerge for currently enabled ovls */ fifo_merge = get_use_fifo_merge(); dss_setup_fifos(fifo_merge); dss_apply_fifo_merge(fifo_merge); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); /* wait for fifo configs to go in */ wait_pending_extra_info_updates(); /* step 2: enable the overlay */ spin_lock_irqsave(&data_lock, flags); op->enabling = false; dss_apply_ovl_enable(ovl, true); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); /* wait for overlay to be enabled */ wait_pending_extra_info_updates(); mutex_unlock(&apply_lock); return 0; err2: op->enabling = false; spin_unlock_irqrestore(&data_lock, flags); err1: mutex_unlock(&apply_lock); return r; } int dss_ovl_disable(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); unsigned long flags; bool fifo_merge; int r; mutex_lock(&apply_lock); if (!op->enabled) { r = 0; goto err; } if (ovl->manager == NULL || ovl->manager->device == NULL) { r = -EINVAL; goto err; } /* step 1: disable the overlay */ spin_lock_irqsave(&data_lock, flags); dss_apply_ovl_enable(ovl, false); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); /* wait for the overlay to be disabled */ wait_pending_extra_info_updates(); /* step 2: configure fifos/fifomerge */ spin_lock_irqsave(&data_lock, flags); fifo_merge = get_use_fifo_merge(); dss_setup_fifos(fifo_merge); dss_apply_fifo_merge(fifo_merge); dss_write_regs(); dss_set_go_bits(); spin_unlock_irqrestore(&data_lock, flags); /* wait for fifo config to go in */ wait_pending_extra_info_updates(); mutex_unlock(&apply_lock); return 0; err: mutex_unlock(&apply_lock); return r; }
gpl-2.0
jameskdev/lge-kernel-d1l_kr
net/rose/rose_out.c
4292
2864
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void rose_send_iframe(struct sock *sk, struct sk_buff *skb) { struct rose_sock *rose = rose_sk(sk); if (skb == NULL) return; skb->data[2] |= (rose->vr << 5) & 0xE0; skb->data[2] |= (rose->vs << 1) & 0x0E; rose_start_idletimer(sk); rose_transmit_link(skb, rose->neighbour); } void rose_kick(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb, *skbn; unsigned short start, end; if (rose->state != ROSE_STATE_3) return; if (rose->condition & ROSE_COND_PEER_RX_BUSY) return; if (!skb_peek(&sk->sk_write_queue)) return; start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs; end = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS; if (start == end) return; rose->vs = start; /* * Transmit data until either we're out of data to send or * the window is full. */ skb = skb_dequeue(&sk->sk_write_queue); do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&sk->sk_write_queue, skb); break; } skb_set_owner_w(skbn, sk); /* * Transmit the frame copy. */ rose_send_iframe(sk, skbn); rose->vs = (rose->vs + 1) % ROSE_MODULUS; /* * Requeue the original data frame. */ skb_queue_tail(&rose->ack_queue, skb); } while (rose->vs != end && (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); } /* * The following routines are taken from page 170 of the 7th ARRL Computer * Networking Conference paper, as is the whole state machine. */ void rose_enquiry_response(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); if (rose->condition & ROSE_COND_OWN_RX_BUSY) rose_write_internal(sk, ROSE_RNR); else rose_write_internal(sk, ROSE_RR); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); }
gpl-2.0
winpih/Riderism-crc-2.6.35
net/rose/rose_out.c
4292
2864
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/gfp.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> /* * This procedure is passed a buffer descriptor for an iframe. It builds * the rest of the control part of the frame and then writes it out. */ static void rose_send_iframe(struct sock *sk, struct sk_buff *skb) { struct rose_sock *rose = rose_sk(sk); if (skb == NULL) return; skb->data[2] |= (rose->vr << 5) & 0xE0; skb->data[2] |= (rose->vs << 1) & 0x0E; rose_start_idletimer(sk); rose_transmit_link(skb, rose->neighbour); } void rose_kick(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb, *skbn; unsigned short start, end; if (rose->state != ROSE_STATE_3) return; if (rose->condition & ROSE_COND_PEER_RX_BUSY) return; if (!skb_peek(&sk->sk_write_queue)) return; start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs; end = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS; if (start == end) return; rose->vs = start; /* * Transmit data until either we're out of data to send or * the window is full. */ skb = skb_dequeue(&sk->sk_write_queue); do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&sk->sk_write_queue, skb); break; } skb_set_owner_w(skbn, sk); /* * Transmit the frame copy. */ rose_send_iframe(sk, skbn); rose->vs = (rose->vs + 1) % ROSE_MODULUS; /* * Requeue the original data frame. */ skb_queue_tail(&rose->ack_queue, skb); } while (rose->vs != end && (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); } /* * The following routines are taken from page 170 of the 7th ARRL Computer * Networking Conference paper, as is the whole state machine. */ void rose_enquiry_response(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); if (rose->condition & ROSE_COND_OWN_RX_BUSY) rose_write_internal(sk, ROSE_RNR); else rose_write_internal(sk, ROSE_RR); rose->vl = rose->vr; rose->condition &= ~ROSE_COND_ACK_PENDING; rose_stop_timer(sk); }
gpl-2.0
yank555-lu/private_msm8660_ics
arch/powerpc/platforms/cell/spu_notify.c
4548
1999
/* * Move OProfile dependencies from spufs module to the kernel so it * can run on non-cell PPC. * * Copyright (C) IBM 2005 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #undef DEBUG #include <linux/module.h> #include <asm/spu.h> #include "spufs/spufs.h" static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier); void spu_switch_notify(struct spu *spu, struct spu_context *ctx) { blocking_notifier_call_chain(&spu_switch_notifier, ctx ? ctx->object_id : 0, spu); } EXPORT_SYMBOL_GPL(spu_switch_notify); int spu_switch_event_register(struct notifier_block *n) { int ret; ret = blocking_notifier_chain_register(&spu_switch_notifier, n); if (!ret) notify_spus_active(); return ret; } EXPORT_SYMBOL_GPL(spu_switch_event_register); int spu_switch_event_unregister(struct notifier_block *n) { return blocking_notifier_chain_unregister(&spu_switch_notifier, n); } EXPORT_SYMBOL_GPL(spu_switch_event_unregister); void spu_set_profile_private_kref(struct spu_context *ctx, struct kref *prof_info_kref, void (* prof_info_release) (struct kref *kref)) { ctx->prof_priv_kref = prof_info_kref; ctx->prof_priv_release = prof_info_release; } EXPORT_SYMBOL_GPL(spu_set_profile_private_kref); void *spu_get_profile_private_kref(struct spu_context *ctx) { return ctx->prof_priv_kref; } EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
gpl-2.0
Fusion-Devices/android_kernel_samsung_klte
drivers/watchdog/wm831x_wdt.c
4804
7930
/* * Watchdog driver for the wm831x PMICs * * Copyright (C) 2009 Wolfson Microelectronics * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/uaccess.h> #include <linux/gpio.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/watchdog.h> static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); struct wm831x_wdt_drvdata { struct watchdog_device wdt; struct wm831x *wm831x; struct mutex lock; int update_gpio; int update_state; }; /* We can't use the sub-second values here but they're included * for completeness. */ static struct { unsigned int time; /* Seconds */ u16 val; /* WDOG_TO value */ } wm831x_wdt_cfgs[] = { { 1, 2 }, { 2, 3 }, { 4, 4 }, { 8, 5 }, { 16, 6 }, { 32, 7 }, { 33, 7 }, /* Actually 32.768s so include both, others round down */ }; static int wm831x_wdt_start(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; mutex_lock(&driver_data->lock); ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_ENA, WM831X_WDOG_ENA); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_stop(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; mutex_lock(&driver_data->lock); ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_ENA, 0); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_ping(struct watchdog_device *wdt_dev) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret; u16 reg; mutex_lock(&driver_data->lock); if (driver_data->update_gpio) { gpio_set_value_cansleep(driver_data->update_gpio, driver_data->update_state); driver_data->update_state = !driver_data->update_state; ret = 0; goto out; } reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (!(reg & WM831X_WDOG_RST_SRC)) { dev_err(wm831x->dev, "Hardware watchdog update unsupported\n"); ret = -EINVAL; goto out; } reg |= WM831X_WDOG_RESET; ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } out: mutex_unlock(&driver_data->lock); return ret; } static int wm831x_wdt_set_timeout(struct watchdog_device *wdt_dev, unsigned int timeout) { struct wm831x_wdt_drvdata *driver_data = watchdog_get_drvdata(wdt_dev); struct wm831x *wm831x = driver_data->wm831x; int ret, i; for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) if (wm831x_wdt_cfgs[i].time == timeout) break; if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) return -EINVAL; ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_set_bits(wm831x, WM831X_WATCHDOG, WM831X_WDOG_TO_MASK, wm831x_wdt_cfgs[i].val); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); } wdt_dev->timeout = timeout; return ret; } static const struct watchdog_info wm831x_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "WM831x Watchdog", }; static const struct watchdog_ops wm831x_wdt_ops = { .owner = THIS_MODULE, .start = wm831x_wdt_start, .stop = wm831x_wdt_stop, .ping = wm831x_wdt_ping, .set_timeout = wm831x_wdt_set_timeout, }; static int __devinit wm831x_wdt_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *chip_pdata; struct wm831x_watchdog_pdata *pdata; struct wm831x_wdt_drvdata *driver_data; struct watchdog_device *wm831x_wdt; int reg, ret, i; ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read watchdog status: %d\n", ret); goto err; } reg = ret; if (reg & WM831X_WDOG_DEBUG) dev_warn(wm831x->dev, "Watchdog is paused\n"); driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data), GFP_KERNEL); if (!driver_data) { dev_err(wm831x->dev, "Unable to alloacate watchdog device\n"); ret = -ENOMEM; goto err; } mutex_init(&driver_data->lock); driver_data->wm831x = wm831x; wm831x_wdt = &driver_data->wdt; wm831x_wdt->info = &wm831x_wdt_info; wm831x_wdt->ops = &wm831x_wdt_ops; watchdog_set_nowayout(wm831x_wdt, nowayout); watchdog_set_drvdata(wm831x_wdt, driver_data); reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); reg &= WM831X_WDOG_TO_MASK; for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++) if (wm831x_wdt_cfgs[i].val == reg) break; if (i == ARRAY_SIZE(wm831x_wdt_cfgs)) dev_warn(wm831x->dev, "Unknown watchdog timeout: %x\n", reg); else wm831x_wdt->timeout = wm831x_wdt_cfgs[i].time; /* Apply any configuration */ if (pdev->dev.parent->platform_data) { chip_pdata = pdev->dev.parent->platform_data; pdata = chip_pdata->watchdog; } else { pdata = NULL; } if (pdata) { reg &= ~(WM831X_WDOG_SECACT_MASK | WM831X_WDOG_PRIMACT_MASK | WM831X_WDOG_RST_SRC); reg |= pdata->primary << WM831X_WDOG_PRIMACT_SHIFT; reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT; reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT; if (pdata->update_gpio) { ret = gpio_request(pdata->update_gpio, "Watchdog update"); if (ret < 0) { dev_err(wm831x->dev, "Failed to request update GPIO: %d\n", ret); goto err; } ret = gpio_direction_output(pdata->update_gpio, 0); if (ret != 0) { dev_err(wm831x->dev, "gpio_direction_output returned: %d\n", ret); goto err_gpio; } driver_data->update_gpio = pdata->update_gpio; /* Make sure the watchdog takes hardware updates */ reg |= WM831X_WDOG_RST_SRC; } ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); wm831x_reg_lock(wm831x); } else { dev_err(wm831x->dev, "Failed to unlock security key: %d\n", ret); goto err_gpio; } } ret = watchdog_register_device(&driver_data->wdt); if (ret != 0) { dev_err(wm831x->dev, "watchdog_register_device() failed: %d\n", ret); goto err_gpio; } dev_set_drvdata(&pdev->dev, driver_data); return 0; err_gpio: if (driver_data->update_gpio) gpio_free(driver_data->update_gpio); err: return ret; } static int __devexit wm831x_wdt_remove(struct platform_device *pdev) { struct wm831x_wdt_drvdata *driver_data = dev_get_drvdata(&pdev->dev); watchdog_unregister_device(&driver_data->wdt); if (driver_data->update_gpio) gpio_free(driver_data->update_gpio); return 0; } static struct platform_driver wm831x_wdt_driver = { .probe = wm831x_wdt_probe, .remove = __devexit_p(wm831x_wdt_remove), .driver = { .name = "wm831x-watchdog", }, }; module_platform_driver(wm831x_wdt_driver); MODULE_AUTHOR("Mark Brown"); MODULE_DESCRIPTION("WM831x Watchdog"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-watchdog");
gpl-2.0
DevSwift/Kernel-3.4-NovaThor
arch/mips/alchemy/common/prom.c
12228
3438
/* * * BRIEF MODULE DESCRIPTION * PROM library initialisation code, supports YAMON and U-Boot. * * Copyright 2000-2001, 2006, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This file was derived from Carsten Langgaard's * arch/mips/mips-boards/xx files. * * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/string.h> #include <asm/bootinfo.h> int prom_argc; char **prom_argv; char **prom_envp; void __init prom_init_cmdline(void) { int i; for (i = 1; i < prom_argc; i++) { strlcat(arcs_cmdline, prom_argv[i], COMMAND_LINE_SIZE); if (i < (prom_argc - 1)) strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); } } char *prom_getenv(char *envname) { /* * Return a pointer to the given environment variable. * YAMON uses "name", "value" pairs, while U-Boot uses "name=value". */ char **env = prom_envp; int i = strlen(envname); int yamon = (*env && strchr(*env, '=') == NULL); while (*env) { if (yamon) { if (strcmp(envname, *env++) == 0) return *env; } else if (strncmp(envname, *env, i) == 0 && (*env)[i] == '=') return *env + i + 1; env++; } return NULL; } static inline unsigned char str2hexnum(unsigned char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return 0; /* foo */ } static inline void str2eaddr(unsigned char *ea, unsigned char *str) { int i; for (i = 0; i < 6; i++) { unsigned char num; if ((*str == '.') || (*str == ':')) str++; num = str2hexnum(*str++) << 4; num |= str2hexnum(*str++); ea[i] = num; } } int __init prom_get_ethernet_addr(char *ethernet_addr) { char *ethaddr_str; /* Check the environment variables first */ ethaddr_str = prom_getenv("ethaddr"); if (!ethaddr_str) { /* Check command line */ ethaddr_str = strstr(arcs_cmdline, "ethaddr="); if (!ethaddr_str) return -1; ethaddr_str += strlen("ethaddr="); } str2eaddr(ethernet_addr, ethaddr_str); return 0; } void __init prom_free_prom_memory(void) { }
gpl-2.0
wangyikai/linux
sound/soc/codecs/wm5110.c
197
71880
/* * wm5110.c -- WM5110 ALSA SoC Audio driver * * Copyright 2012 Wolfson Microelectronics plc * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include <sound/initval.h> #include <sound/tlv.h> #include <linux/mfd/arizona/core.h> #include <linux/mfd/arizona/registers.h> #include "arizona.h" #include "wm_adsp.h" #include "wm5110.h" #define WM5110_NUM_ADSP 4 struct wm5110_priv { struct arizona_priv core; struct arizona_fll fll[2]; }; static const struct wm_adsp_region wm5110_dsp1_regions[] = { { .type = WMFW_ADSP2_PM, .base = 0x100000 }, { .type = WMFW_ADSP2_ZM, .base = 0x180000 }, { .type = WMFW_ADSP2_XM, .base = 0x190000 }, { .type = WMFW_ADSP2_YM, .base = 0x1a8000 }, }; static const struct wm_adsp_region wm5110_dsp2_regions[] = { { .type = WMFW_ADSP2_PM, .base = 0x200000 }, { .type = WMFW_ADSP2_ZM, .base = 0x280000 }, { .type = WMFW_ADSP2_XM, .base = 0x290000 }, { .type = WMFW_ADSP2_YM, .base = 0x2a8000 }, }; static const struct wm_adsp_region wm5110_dsp3_regions[] = { { .type = WMFW_ADSP2_PM, .base = 0x300000 }, { .type = WMFW_ADSP2_ZM, .base = 0x380000 }, { .type = WMFW_ADSP2_XM, .base = 0x390000 }, { .type = WMFW_ADSP2_YM, .base = 0x3a8000 }, }; static const struct wm_adsp_region wm5110_dsp4_regions[] = { { .type = WMFW_ADSP2_PM, .base = 0x400000 }, { .type = WMFW_ADSP2_ZM, .base = 0x480000 }, { .type = WMFW_ADSP2_XM, .base = 0x490000 }, { .type = WMFW_ADSP2_YM, .base = 0x4a8000 }, }; static const struct wm_adsp_region *wm5110_dsp_regions[] = { wm5110_dsp1_regions, wm5110_dsp2_regions, wm5110_dsp3_regions, wm5110_dsp4_regions, }; static const struct reg_default wm5110_sysclk_revd_patch[] = { { 0x3093, 0x1001 }, { 0x30E3, 0x1301 }, { 0x3133, 0x1201 }, { 0x3183, 0x1501 }, { 0x31D3, 0x1401 }, { 0x0049, 0x01ea }, { 0x004a, 0x01f2 }, { 0x0057, 0x01e7 }, { 0x0058, 0x01fb }, { 0x33ce, 0xc4f5 }, { 0x33cf, 0x1361 }, { 0x33d0, 0x0402 }, { 0x33d1, 0x4700 }, { 0x33d2, 0x026d }, { 0x33d3, 0xff00 }, { 0x33d4, 0x026d }, { 0x33d5, 0x0101 }, { 0x33d6, 0xc4f5 }, { 0x33d7, 0x0361 }, { 0x33d8, 0x0402 }, { 0x33d9, 0x6701 }, { 0x33da, 0xc4f5 }, { 0x33db, 0x136f }, { 0x33dc, 0xc4f5 }, { 0x33dd, 0x134f }, { 0x33de, 0xc4f5 }, { 0x33df, 0x131f }, { 0x33e0, 0x026d }, { 0x33e1, 0x4f01 }, { 0x33e2, 0x026d }, { 0x33e3, 0xf100 }, { 0x33e4, 0x026d }, { 0x33e5, 0x0001 }, { 0x33e6, 0xc4f5 }, { 0x33e7, 0x0361 }, { 0x33e8, 0x0402 }, { 0x33e9, 0x6601 }, { 0x33ea, 0xc4f5 }, { 0x33eb, 0x136f }, { 0x33ec, 0xc4f5 }, { 0x33ed, 0x134f }, { 0x33ee, 0xc4f5 }, { 0x33ef, 0x131f }, { 0x33f0, 0x026d }, { 0x33f1, 0x4e01 }, { 0x33f2, 0x026d }, { 0x33f3, 0xf000 }, { 0x33f6, 0xc4f5 }, { 0x33f7, 0x1361 }, { 0x33f8, 0x0402 }, { 0x33f9, 0x4600 }, { 0x33fa, 0x026d }, { 0x33fb, 0xfe00 }, }; static const struct reg_default wm5110_sysclk_reve_patch[] = { { 0x3270, 0xE410 }, { 0x3271, 0x3078 }, { 0x3272, 0xE410 }, { 0x3273, 0x3070 }, { 0x3274, 0xE410 }, { 0x3275, 0x3066 }, { 0x3276, 0xE410 }, { 0x3277, 0x3056 }, { 0x327A, 0xE414 }, { 0x327B, 0x3078 }, { 0x327C, 0xE414 }, { 0x327D, 0x3070 }, { 0x327E, 0xE414 }, { 0x327F, 0x3066 }, { 0x3280, 0xE414 }, { 0x3281, 0x3056 }, }; static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct arizona *arizona = dev_get_drvdata(codec->dev->parent); struct regmap *regmap = arizona->regmap; const struct reg_default *patch = NULL; int i, patch_size; switch (arizona->rev) { case 3: patch = wm5110_sysclk_revd_patch; patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch); break; default: patch = wm5110_sysclk_reve_patch; patch_size = ARRAY_SIZE(wm5110_sysclk_reve_patch); break; } switch (event) { case SND_SOC_DAPM_POST_PMU: if (patch) for (i = 0; i < patch_size; i++) regmap_write_async(regmap, patch[i].reg, patch[i].def); break; default: break; } return 0; } static const struct reg_sequence wm5110_no_dre_left_enable[] = { { 0x3024, 0xE410 }, { 0x3025, 0x0056 }, { 0x301B, 0x0224 }, { 0x301F, 0x4263 }, { 0x3021, 0x5291 }, { 0x3030, 0xE410 }, { 0x3031, 0x3066 }, { 0x3032, 0xE410 }, { 0x3033, 0x3070 }, { 0x3034, 0xE410 }, { 0x3035, 0x3078 }, { 0x3036, 0xE410 }, { 0x3037, 0x3080 }, { 0x3038, 0xE410 }, { 0x3039, 0x3080 }, }; static const struct reg_sequence wm5110_dre_left_enable[] = { { 0x3024, 0x0231 }, { 0x3025, 0x0B00 }, { 0x301B, 0x0227 }, { 0x301F, 0x4266 }, { 0x3021, 0x5294 }, { 0x3030, 0xE231 }, { 0x3031, 0x0266 }, { 0x3032, 0x8231 }, { 0x3033, 0x4B15 }, { 0x3034, 0x8231 }, { 0x3035, 0x0B15 }, { 0x3036, 0xE231 }, { 0x3037, 0x5294 }, { 0x3038, 0x0231 }, { 0x3039, 0x0B00 }, }; static const struct reg_sequence wm5110_no_dre_right_enable[] = { { 0x3074, 0xE414 }, { 0x3075, 0x0056 }, { 0x306B, 0x0224 }, { 0x306F, 0x4263 }, { 0x3071, 0x5291 }, { 0x3080, 0xE414 }, { 0x3081, 0x3066 }, { 0x3082, 0xE414 }, { 0x3083, 0x3070 }, { 0x3084, 0xE414 }, { 0x3085, 0x3078 }, { 0x3086, 0xE414 }, { 0x3087, 0x3080 }, { 0x3088, 0xE414 }, { 0x3089, 0x3080 }, }; static const struct reg_sequence wm5110_dre_right_enable[] = { { 0x3074, 0x0231 }, { 0x3075, 0x0B00 }, { 0x306B, 0x0227 }, { 0x306F, 0x4266 }, { 0x3071, 0x5294 }, { 0x3080, 0xE231 }, { 0x3081, 0x0266 }, { 0x3082, 0x8231 }, { 0x3083, 0x4B17 }, { 0x3084, 0x8231 }, { 0x3085, 0x0B17 }, { 0x3086, 0xE231 }, { 0x3087, 0x5294 }, { 0x3088, 0x0231 }, { 0x3089, 0x0B00 }, }; static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); struct arizona *arizona = priv->arizona; unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE); const struct reg_sequence *wseq; int nregs; switch (w->shift) { case ARIZONA_OUT1L_ENA_SHIFT: if (val & ARIZONA_DRE1L_ENA_MASK) { wseq = wm5110_dre_left_enable; nregs = ARRAY_SIZE(wm5110_dre_left_enable); } else { wseq = wm5110_no_dre_left_enable; nregs = ARRAY_SIZE(wm5110_no_dre_left_enable); priv->out_up_delay += 10; } break; case ARIZONA_OUT1R_ENA_SHIFT: if (val & ARIZONA_DRE1R_ENA_MASK) { wseq = wm5110_dre_right_enable; nregs = ARRAY_SIZE(wm5110_dre_right_enable); } else { wseq = wm5110_no_dre_right_enable; nregs = ARRAY_SIZE(wm5110_no_dre_right_enable); priv->out_up_delay += 10; } break; default: return 0; } return regmap_multi_reg_write(arizona->regmap, wseq, nregs); } static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE); switch (w->shift) { case ARIZONA_OUT1L_ENA_SHIFT: if (!(val & ARIZONA_DRE1L_ENA_MASK)) { snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG1, ARIZONA_WS_TRG1); snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG1, 0); priv->out_down_delay += 27; } break; case ARIZONA_OUT1R_ENA_SHIFT: if (!(val & ARIZONA_DRE1R_ENA_MASK)) { snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG2, ARIZONA_WS_TRG2); snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG2, 0); priv->out_down_delay += 27; } break; default: break; } return 0; } static int wm5110_hp_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); switch (priv->arizona->rev) { case 0 ... 3: break; default: switch (event) { case SND_SOC_DAPM_PRE_PMU: wm5110_hp_pre_enable(w); break; case SND_SOC_DAPM_PRE_PMD: wm5110_hp_pre_disable(w); break; default: break; } break; } return arizona_hp_ev(w, kcontrol, event); } static int wm5110_clear_pga_volume(struct arizona *arizona, int output) { struct reg_sequence clear_pga = { ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80 }; int ret; ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1); if (ret) dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n", clear_pga.reg, ret); return ret; } static int wm5110_put_dre(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); struct arizona *arizona = dev_get_drvdata(codec->dev->parent); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; unsigned int ena, dre; unsigned int mask = (0x1 << mc->shift) | (0x1 << mc->rshift); unsigned int lnew = (!!ucontrol->value.integer.value[0]) << mc->shift; unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift; unsigned int lold, rold; unsigned int lena, rena; int ret; snd_soc_dapm_mutex_lock(dapm); ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &ena); if (ret) { dev_err(arizona->dev, "Failed to read output state: %d\n", ret); goto err; } ret = regmap_read(arizona->regmap, ARIZONA_DRE_ENABLE, &dre); if (ret) { dev_err(arizona->dev, "Failed to read DRE state: %d\n", ret); goto err; } lold = dre & (1 << mc->shift); rold = dre & (1 << mc->rshift); /* Enables are channel wise swapped from the DRE enables */ lena = ena & (1 << mc->rshift); rena = ena & (1 << mc->shift); if ((lena && lnew != lold) || (rena && rnew != rold)) { dev_err(arizona->dev, "Can't change DRE on active outputs\n"); ret = -EBUSY; goto err; } ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE, mask, lnew | rnew); if (ret) { dev_err(arizona->dev, "Failed to set DRE: %d\n", ret); goto err; } /* Force reset of PGA volumes, if turning DRE off */ if (!lnew && lold) wm5110_clear_pga_volume(arizona, mc->shift); if (!rnew && rold) wm5110_clear_pga_volume(arizona, mc->rshift); err: snd_soc_dapm_mutex_unlock(dapm); return ret; } static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0); static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0); static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0); static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0); #define WM5110_NG_SRC(name, base) \ SOC_SINGLE(name " NG HPOUT1L Switch", base, 0, 1, 0), \ SOC_SINGLE(name " NG HPOUT1R Switch", base, 1, 1, 0), \ SOC_SINGLE(name " NG HPOUT2L Switch", base, 2, 1, 0), \ SOC_SINGLE(name " NG HPOUT2R Switch", base, 3, 1, 0), \ SOC_SINGLE(name " NG HPOUT3L Switch", base, 4, 1, 0), \ SOC_SINGLE(name " NG HPOUT3R Switch", base, 5, 1, 0), \ SOC_SINGLE(name " NG SPKOUTL Switch", base, 6, 1, 0), \ SOC_SINGLE(name " NG SPKOUTR Switch", base, 7, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1L Switch", base, 8, 1, 0), \ SOC_SINGLE(name " NG SPKDAT1R Switch", base, 9, 1, 0), \ SOC_SINGLE(name " NG SPKDAT2L Switch", base, 10, 1, 0), \ SOC_SINGLE(name " NG SPKDAT2R Switch", base, 11, 1, 0) static const struct snd_kcontrol_new wm5110_snd_controls[] = { SOC_ENUM("IN1 OSR", arizona_in_dmic_osr[0]), SOC_ENUM("IN2 OSR", arizona_in_dmic_osr[1]), SOC_ENUM("IN3 OSR", arizona_in_dmic_osr[2]), SOC_ENUM("IN4 OSR", arizona_in_dmic_osr[3]), SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL, ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL, ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL, ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL, ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN3L Volume", ARIZONA_IN3L_CONTROL, ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_SINGLE_RANGE_TLV("IN3R Volume", ARIZONA_IN3R_CONTROL, ARIZONA_IN3R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv), SOC_ENUM("IN HPF Cutoff Frequency", arizona_in_hpf_cut_enum), SOC_SINGLE("IN1L HPF Switch", ARIZONA_IN1L_CONTROL, ARIZONA_IN1L_HPF_SHIFT, 1, 0), SOC_SINGLE("IN1R HPF Switch", ARIZONA_IN1R_CONTROL, ARIZONA_IN1R_HPF_SHIFT, 1, 0), SOC_SINGLE("IN2L HPF Switch", ARIZONA_IN2L_CONTROL, ARIZONA_IN2L_HPF_SHIFT, 1, 0), SOC_SINGLE("IN2R HPF Switch", ARIZONA_IN2R_CONTROL, ARIZONA_IN2R_HPF_SHIFT, 1, 0), SOC_SINGLE("IN3L HPF Switch", ARIZONA_IN3L_CONTROL, ARIZONA_IN3L_HPF_SHIFT, 1, 0), SOC_SINGLE("IN3R HPF Switch", ARIZONA_IN3R_CONTROL, ARIZONA_IN3R_HPF_SHIFT, 1, 0), SOC_SINGLE("IN4L HPF Switch", ARIZONA_IN4L_CONTROL, ARIZONA_IN4L_HPF_SHIFT, 1, 0), SOC_SINGLE("IN4R HPF Switch", ARIZONA_IN4R_CONTROL, ARIZONA_IN4R_HPF_SHIFT, 1, 0), SOC_SINGLE_TLV("IN1L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L, ARIZONA_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN1R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L, ARIZONA_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN2R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN3L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3L, ARIZONA_IN3L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN3R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN4L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_4L, ARIZONA_IN4L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_SINGLE_TLV("IN4R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_4R, ARIZONA_IN4R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp), SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp), ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2), SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2), SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2), SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2), SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT, 24, 0, eq_tlv), ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5, ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA), ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE), ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2), ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2), ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2), ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2), SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode), SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode), SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode), SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode), SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]), SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]), SOC_ENUM("ISRC3 FSL", arizona_isrc_fsl[2]), SOC_ENUM("ISRC1 FSH", arizona_isrc_fsh[0]), SOC_ENUM("ISRC2 FSH", arizona_isrc_fsh[1]), SOC_ENUM("ISRC3 FSH", arizona_isrc_fsh[2]), SOC_ENUM("ASRC RATE 1", arizona_asrc_rate1), ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP2L", ARIZONA_DSP2LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP2R", ARIZONA_DSP2RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP3L", ARIZONA_DSP3LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP3R", ARIZONA_DSP3RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP4L", ARIZONA_DSP4LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DSP4R", ARIZONA_DSP4RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE), SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv), ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT2L", ARIZONA_OUT2LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT2R", ARIZONA_OUT2RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT3L", ARIZONA_OUT3LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("HPOUT3R", ARIZONA_OUT3RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKOUTL", ARIZONA_OUT4LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKOUTR", ARIZONA_OUT4RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE), SOC_SINGLE("HPOUT1 SC Protect Switch", ARIZONA_HP1_SHORT_CIRCUIT_CTRL, ARIZONA_HP1_SC_ENA_SHIFT, 1, 0), SOC_SINGLE("HPOUT2 SC Protect Switch", ARIZONA_HP2_SHORT_CIRCUIT_CTRL, ARIZONA_HP2_SC_ENA_SHIFT, 1, 0), SOC_SINGLE("HPOUT3 SC Protect Switch", ARIZONA_HP3_SHORT_CIRCUIT_CTRL, ARIZONA_HP3_SC_ENA_SHIFT, 1, 0), SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L, ARIZONA_OUT5_OSR_SHIFT, 1, 0), SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L, ARIZONA_OUT6_OSR_SHIFT, 1, 0), SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("HPOUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("HPOUT3 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R("SPKDAT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_6L, ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_MUTE_SHIFT, 1, 1), SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("HPOUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("HPOUT3 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_6L, ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT, 0xbf, 0, digital_tlv), SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT, ARIZONA_SPK1R_MUTE_SHIFT, 1, 1), SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT, ARIZONA_SPK2R_MUTE_SHIFT, 1, 1), SOC_DOUBLE_EXT("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE, ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0, snd_soc_get_volsw, wm5110_put_dre), SOC_DOUBLE_EXT("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE, ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0, snd_soc_get_volsw, wm5110_put_dre), SOC_DOUBLE_EXT("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE, ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0, snd_soc_get_volsw, wm5110_put_dre), SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp), SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp), SOC_SINGLE("Noise Gate Switch", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_ENA_SHIFT, 1, 0), SOC_SINGLE_TLV("Noise Gate Threshold Volume", ARIZONA_NOISE_GATE_CONTROL, ARIZONA_NGATE_THR_SHIFT, 7, 1, ng_tlv), SOC_ENUM("Noise Gate Hold", arizona_ng_hold), WM5110_NG_SRC("HPOUT1L", ARIZONA_NOISE_GATE_SELECT_1L), WM5110_NG_SRC("HPOUT1R", ARIZONA_NOISE_GATE_SELECT_1R), WM5110_NG_SRC("HPOUT2L", ARIZONA_NOISE_GATE_SELECT_2L), WM5110_NG_SRC("HPOUT2R", ARIZONA_NOISE_GATE_SELECT_2R), WM5110_NG_SRC("HPOUT3L", ARIZONA_NOISE_GATE_SELECT_3L), WM5110_NG_SRC("HPOUT3R", ARIZONA_NOISE_GATE_SELECT_3R), WM5110_NG_SRC("SPKOUTL", ARIZONA_NOISE_GATE_SELECT_4L), WM5110_NG_SRC("SPKOUTR", ARIZONA_NOISE_GATE_SELECT_4R), WM5110_NG_SRC("SPKDAT1L", ARIZONA_NOISE_GATE_SELECT_5L), WM5110_NG_SRC("SPKDAT1R", ARIZONA_NOISE_GATE_SELECT_5R), WM5110_NG_SRC("SPKDAT2L", ARIZONA_NOISE_GATE_SELECT_6L), WM5110_NG_SRC("SPKDAT2R", ARIZONA_NOISE_GATE_SELECT_6R), ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX3", ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX4", ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX5", ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF2TX6", ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX1", ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX2", ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX3", ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX4", ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE), }; ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP1L, ARIZONA_DSP1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP1R, ARIZONA_DSP1RMIX_INPUT_1_SOURCE); ARIZONA_DSP_AUX_ENUMS(DSP1, ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP2L, ARIZONA_DSP2LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP2R, ARIZONA_DSP2RMIX_INPUT_1_SOURCE); ARIZONA_DSP_AUX_ENUMS(DSP2, ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP3L, ARIZONA_DSP3LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP3R, ARIZONA_DSP3RMIX_INPUT_1_SOURCE); ARIZONA_DSP_AUX_ENUMS(DSP3, ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP4L, ARIZONA_DSP4LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DSP4R, ARIZONA_DSP4RMIX_INPUT_1_SOURCE); ARIZONA_DSP_AUX_ENUMS(DSP4, ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT2L, ARIZONA_OUT2LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT2R, ARIZONA_OUT2RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT3L, ARIZONA_OUT3LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(OUT3R, ARIZONA_OUT3RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKOUTL, ARIZONA_OUT4LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKOUTR, ARIZONA_OUT4RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT2L, ARIZONA_OUT6LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SPKDAT2R, ARIZONA_OUT6RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX3, ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX4, ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX5, ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF2TX6, ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX1, ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX2, ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX3, ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX4, ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX5, ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX6, ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX7, ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(SLIMTX8, ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT1, ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT2, ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT3, ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1INT4, ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC1, ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC2, ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC3, ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC1DEC4, ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT1, ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT2, ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT3, ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2INT4, ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC1, ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC2, ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC3, ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC2DEC4, ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3INT1, ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3INT2, ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3INT3, ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3INT4, ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3DEC1, ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3DEC2, ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3DEC3, ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE); ARIZONA_MUX_ENUMS(ISRC3DEC4, ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE); static const char *wm5110_aec_loopback_texts[] = { "HPOUT1L", "HPOUT1R", "HPOUT2L", "HPOUT2R", "HPOUT3L", "HPOUT3R", "SPKOUTL", "SPKOUTR", "SPKDAT1L", "SPKDAT1R", "SPKDAT2L", "SPKDAT2R", }; static const unsigned int wm5110_aec_loopback_values[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, }; static const struct soc_enum wm5110_aec_loopback = SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf, ARRAY_SIZE(wm5110_aec_loopback_texts), wm5110_aec_loopback_texts, wm5110_aec_loopback_values); static const struct snd_kcontrol_new wm5110_aec_loopback_mux = SOC_DAPM_ENUM("AEC Loopback", wm5110_aec_loopback); static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT, 0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1, ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK, ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK, ARIZONA_OPCLK_ASYNC_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD3", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0, SND_SOC_DAPM_REGULATOR_BYPASS), SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDL", 0, 0), SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0, 0), SND_SOC_DAPM_SIGGEN("TONE"), SND_SOC_DAPM_SIGGEN("NOISE"), SND_SOC_DAPM_SIGGEN("HAPTICS"), SND_SOC_DAPM_INPUT("IN1L"), SND_SOC_DAPM_INPUT("IN1R"), SND_SOC_DAPM_INPUT("IN2L"), SND_SOC_DAPM_INPUT("IN2R"), SND_SOC_DAPM_INPUT("IN3L"), SND_SOC_DAPM_INPUT("IN3R"), SND_SOC_DAPM_INPUT("IN4L"), SND_SOC_DAPM_INPUT("IN4R"), SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"), SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"), SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN3L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN4L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN4L_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("IN4R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN4R_ENA_SHIFT, 0, NULL, 0, arizona_in_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1, ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2, ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3, ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR, ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1, ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1, ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC1L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC1R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1R_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0, NULL, 0), WM_ADSP2("DSP1", 0), WM_ADSP2("DSP2", 1), WM_ADSP2("DSP3", 2), WM_ADSP2("DSP4", 3), SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT3", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1INT4", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_INT3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC1", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC2", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC3", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC1DEC4", ARIZONA_ISRC_1_CTRL_3, ARIZONA_ISRC1_DEC3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT3", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2INT4", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_INT3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC1", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC3", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC2DEC4", ARIZONA_ISRC_2_CTRL_3, ARIZONA_ISRC2_DEC3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3INT1", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_INT0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3INT2", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_INT1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3INT3", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_INT2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3INT4", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_INT3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3DEC1", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_DEC0_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3DEC2", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_DEC1_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3DEC3", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_DEC2_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("ISRC3DEC4", ARIZONA_ISRC_3_CTRL_3, ARIZONA_ISRC3_DEC3_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1, ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0, &wm5110_aec_loopback_mux), SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0, ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0, ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX3", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX4", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX5", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF2TX6", NULL, 0, ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX3", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX4", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX5", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF2RX6", NULL, 0, ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0, ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE, ARIZONA_SLIMRX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX3_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX4_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX5_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX6_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX7_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0, ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE, ARIZONA_SLIMTX8_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0, ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0, ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0, ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0), SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0, ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0), SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM, ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM, ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT3R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT3R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT6L", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT6L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_PGA_E("OUT6R", ARIZONA_OUTPUT_ENABLES_1, ARIZONA_OUT6R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev, SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"), ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"), ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"), ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"), ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"), ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"), ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"), ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"), ARIZONA_MIXER_WIDGETS(Mic, "Mic"), ARIZONA_MIXER_WIDGETS(Noise, "Noise"), ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"), ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"), ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"), ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"), ARIZONA_MIXER_WIDGETS(OUT2L, "HPOUT2L"), ARIZONA_MIXER_WIDGETS(OUT2R, "HPOUT2R"), ARIZONA_MIXER_WIDGETS(OUT3L, "HPOUT3L"), ARIZONA_MIXER_WIDGETS(OUT3R, "HPOUT3R"), ARIZONA_MIXER_WIDGETS(SPKOUTL, "SPKOUTL"), ARIZONA_MIXER_WIDGETS(SPKOUTR, "SPKOUTR"), ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"), ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"), ARIZONA_MIXER_WIDGETS(SPKDAT2L, "SPKDAT2L"), ARIZONA_MIXER_WIDGETS(SPKDAT2R, "SPKDAT2R"), ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"), ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"), ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"), ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"), ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"), ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"), ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"), ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"), ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"), ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"), ARIZONA_MIXER_WIDGETS(AIF2TX3, "AIF2TX3"), ARIZONA_MIXER_WIDGETS(AIF2TX4, "AIF2TX4"), ARIZONA_MIXER_WIDGETS(AIF2TX5, "AIF2TX5"), ARIZONA_MIXER_WIDGETS(AIF2TX6, "AIF2TX6"), ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"), ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"), ARIZONA_MIXER_WIDGETS(SLIMTX1, "SLIMTX1"), ARIZONA_MIXER_WIDGETS(SLIMTX2, "SLIMTX2"), ARIZONA_MIXER_WIDGETS(SLIMTX3, "SLIMTX3"), ARIZONA_MIXER_WIDGETS(SLIMTX4, "SLIMTX4"), ARIZONA_MIXER_WIDGETS(SLIMTX5, "SLIMTX5"), ARIZONA_MIXER_WIDGETS(SLIMTX6, "SLIMTX6"), ARIZONA_MIXER_WIDGETS(SLIMTX7, "SLIMTX7"), ARIZONA_MIXER_WIDGETS(SLIMTX8, "SLIMTX8"), ARIZONA_MUX_WIDGETS(ASRC1L, "ASRC1L"), ARIZONA_MUX_WIDGETS(ASRC1R, "ASRC1R"), ARIZONA_MUX_WIDGETS(ASRC2L, "ASRC2L"), ARIZONA_MUX_WIDGETS(ASRC2R, "ASRC2R"), ARIZONA_DSP_WIDGETS(DSP1, "DSP1"), ARIZONA_DSP_WIDGETS(DSP2, "DSP2"), ARIZONA_DSP_WIDGETS(DSP3, "DSP3"), ARIZONA_DSP_WIDGETS(DSP4, "DSP4"), ARIZONA_MUX_WIDGETS(ISRC1DEC1, "ISRC1DEC1"), ARIZONA_MUX_WIDGETS(ISRC1DEC2, "ISRC1DEC2"), ARIZONA_MUX_WIDGETS(ISRC1DEC3, "ISRC1DEC3"), ARIZONA_MUX_WIDGETS(ISRC1DEC4, "ISRC1DEC4"), ARIZONA_MUX_WIDGETS(ISRC1INT1, "ISRC1INT1"), ARIZONA_MUX_WIDGETS(ISRC1INT2, "ISRC1INT2"), ARIZONA_MUX_WIDGETS(ISRC1INT3, "ISRC1INT3"), ARIZONA_MUX_WIDGETS(ISRC1INT4, "ISRC1INT4"), ARIZONA_MUX_WIDGETS(ISRC2DEC1, "ISRC2DEC1"), ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"), ARIZONA_MUX_WIDGETS(ISRC2DEC3, "ISRC2DEC3"), ARIZONA_MUX_WIDGETS(ISRC2DEC4, "ISRC2DEC4"), ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"), ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"), ARIZONA_MUX_WIDGETS(ISRC2INT3, "ISRC2INT3"), ARIZONA_MUX_WIDGETS(ISRC2INT4, "ISRC2INT4"), ARIZONA_MUX_WIDGETS(ISRC3DEC1, "ISRC3DEC1"), ARIZONA_MUX_WIDGETS(ISRC3DEC2, "ISRC3DEC2"), ARIZONA_MUX_WIDGETS(ISRC3DEC3, "ISRC3DEC3"), ARIZONA_MUX_WIDGETS(ISRC3DEC4, "ISRC3DEC4"), ARIZONA_MUX_WIDGETS(ISRC3INT1, "ISRC3INT1"), ARIZONA_MUX_WIDGETS(ISRC3INT2, "ISRC3INT2"), ARIZONA_MUX_WIDGETS(ISRC3INT3, "ISRC3INT3"), ARIZONA_MUX_WIDGETS(ISRC3INT4, "ISRC3INT4"), SND_SOC_DAPM_OUTPUT("HPOUT1L"), SND_SOC_DAPM_OUTPUT("HPOUT1R"), SND_SOC_DAPM_OUTPUT("HPOUT2L"), SND_SOC_DAPM_OUTPUT("HPOUT2R"), SND_SOC_DAPM_OUTPUT("HPOUT3L"), SND_SOC_DAPM_OUTPUT("HPOUT3R"), SND_SOC_DAPM_OUTPUT("SPKOUTLN"), SND_SOC_DAPM_OUTPUT("SPKOUTLP"), SND_SOC_DAPM_OUTPUT("SPKOUTRN"), SND_SOC_DAPM_OUTPUT("SPKOUTRP"), SND_SOC_DAPM_OUTPUT("SPKDAT1L"), SND_SOC_DAPM_OUTPUT("SPKDAT1R"), SND_SOC_DAPM_OUTPUT("SPKDAT2L"), SND_SOC_DAPM_OUTPUT("SPKDAT2R"), SND_SOC_DAPM_OUTPUT("MICSUPP"), }; #define ARIZONA_MIXER_INPUT_ROUTES(name) \ { name, "Noise Generator", "Noise Generator" }, \ { name, "Tone Generator 1", "Tone Generator 1" }, \ { name, "Tone Generator 2", "Tone Generator 2" }, \ { name, "Haptics", "HAPTICS" }, \ { name, "AEC", "AEC Loopback" }, \ { name, "IN1L", "IN1L PGA" }, \ { name, "IN1R", "IN1R PGA" }, \ { name, "IN2L", "IN2L PGA" }, \ { name, "IN2R", "IN2R PGA" }, \ { name, "IN3L", "IN3L PGA" }, \ { name, "IN3R", "IN3R PGA" }, \ { name, "IN4L", "IN4L PGA" }, \ { name, "IN4R", "IN4R PGA" }, \ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \ { name, "AIF1RX1", "AIF1RX1" }, \ { name, "AIF1RX2", "AIF1RX2" }, \ { name, "AIF1RX3", "AIF1RX3" }, \ { name, "AIF1RX4", "AIF1RX4" }, \ { name, "AIF1RX5", "AIF1RX5" }, \ { name, "AIF1RX6", "AIF1RX6" }, \ { name, "AIF1RX7", "AIF1RX7" }, \ { name, "AIF1RX8", "AIF1RX8" }, \ { name, "AIF2RX1", "AIF2RX1" }, \ { name, "AIF2RX2", "AIF2RX2" }, \ { name, "AIF2RX3", "AIF2RX3" }, \ { name, "AIF2RX4", "AIF2RX4" }, \ { name, "AIF2RX5", "AIF2RX5" }, \ { name, "AIF2RX6", "AIF2RX6" }, \ { name, "AIF3RX1", "AIF3RX1" }, \ { name, "AIF3RX2", "AIF3RX2" }, \ { name, "SLIMRX1", "SLIMRX1" }, \ { name, "SLIMRX2", "SLIMRX2" }, \ { name, "SLIMRX3", "SLIMRX3" }, \ { name, "SLIMRX4", "SLIMRX4" }, \ { name, "SLIMRX5", "SLIMRX5" }, \ { name, "SLIMRX6", "SLIMRX6" }, \ { name, "SLIMRX7", "SLIMRX7" }, \ { name, "SLIMRX8", "SLIMRX8" }, \ { name, "EQ1", "EQ1" }, \ { name, "EQ2", "EQ2" }, \ { name, "EQ3", "EQ3" }, \ { name, "EQ4", "EQ4" }, \ { name, "DRC1L", "DRC1L" }, \ { name, "DRC1R", "DRC1R" }, \ { name, "DRC2L", "DRC2L" }, \ { name, "DRC2R", "DRC2R" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" }, \ { name, "LHPF3", "LHPF3" }, \ { name, "LHPF4", "LHPF4" }, \ { name, "ASRC1L", "ASRC1L" }, \ { name, "ASRC1R", "ASRC1R" }, \ { name, "ASRC2L", "ASRC2L" }, \ { name, "ASRC2R", "ASRC2R" }, \ { name, "ISRC1DEC1", "ISRC1DEC1" }, \ { name, "ISRC1DEC2", "ISRC1DEC2" }, \ { name, "ISRC1DEC3", "ISRC1DEC3" }, \ { name, "ISRC1DEC4", "ISRC1DEC4" }, \ { name, "ISRC1INT1", "ISRC1INT1" }, \ { name, "ISRC1INT2", "ISRC1INT2" }, \ { name, "ISRC1INT3", "ISRC1INT3" }, \ { name, "ISRC1INT4", "ISRC1INT4" }, \ { name, "ISRC2DEC1", "ISRC2DEC1" }, \ { name, "ISRC2DEC2", "ISRC2DEC2" }, \ { name, "ISRC2DEC3", "ISRC2DEC3" }, \ { name, "ISRC2DEC4", "ISRC2DEC4" }, \ { name, "ISRC2INT1", "ISRC2INT1" }, \ { name, "ISRC2INT2", "ISRC2INT2" }, \ { name, "ISRC2INT3", "ISRC2INT3" }, \ { name, "ISRC2INT4", "ISRC2INT4" }, \ { name, "ISRC3DEC1", "ISRC3DEC1" }, \ { name, "ISRC3DEC2", "ISRC3DEC2" }, \ { name, "ISRC3DEC3", "ISRC3DEC3" }, \ { name, "ISRC3DEC4", "ISRC3DEC4" }, \ { name, "ISRC3INT1", "ISRC3INT1" }, \ { name, "ISRC3INT2", "ISRC3INT2" }, \ { name, "ISRC3INT3", "ISRC3INT3" }, \ { name, "ISRC3INT4", "ISRC3INT4" }, \ { name, "DSP1.1", "DSP1" }, \ { name, "DSP1.2", "DSP1" }, \ { name, "DSP1.3", "DSP1" }, \ { name, "DSP1.4", "DSP1" }, \ { name, "DSP1.5", "DSP1" }, \ { name, "DSP1.6", "DSP1" }, \ { name, "DSP2.1", "DSP2" }, \ { name, "DSP2.2", "DSP2" }, \ { name, "DSP2.3", "DSP2" }, \ { name, "DSP2.4", "DSP2" }, \ { name, "DSP2.5", "DSP2" }, \ { name, "DSP2.6", "DSP2" }, \ { name, "DSP3.1", "DSP3" }, \ { name, "DSP3.2", "DSP3" }, \ { name, "DSP3.3", "DSP3" }, \ { name, "DSP3.4", "DSP3" }, \ { name, "DSP3.5", "DSP3" }, \ { name, "DSP3.6", "DSP3" }, \ { name, "DSP4.1", "DSP4" }, \ { name, "DSP4.2", "DSP4" }, \ { name, "DSP4.3", "DSP4" }, \ { name, "DSP4.4", "DSP4" }, \ { name, "DSP4.5", "DSP4" }, \ { name, "DSP4.6", "DSP4" } static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "AIF2 Capture", NULL, "DBVDD2" }, { "AIF2 Playback", NULL, "DBVDD2" }, { "AIF3 Capture", NULL, "DBVDD3" }, { "AIF3 Playback", NULL, "DBVDD3" }, { "OUT1L", NULL, "CPVDD" }, { "OUT1R", NULL, "CPVDD" }, { "OUT2L", NULL, "CPVDD" }, { "OUT2R", NULL, "CPVDD" }, { "OUT3L", NULL, "CPVDD" }, { "OUT3R", NULL, "CPVDD" }, { "OUT4L", NULL, "SPKVDDL" }, { "OUT4R", NULL, "SPKVDDR" }, { "OUT1L", NULL, "SYSCLK" }, { "OUT1R", NULL, "SYSCLK" }, { "OUT2L", NULL, "SYSCLK" }, { "OUT2R", NULL, "SYSCLK" }, { "OUT3L", NULL, "SYSCLK" }, { "OUT4L", NULL, "SYSCLK" }, { "OUT4R", NULL, "SYSCLK" }, { "OUT5L", NULL, "SYSCLK" }, { "OUT5R", NULL, "SYSCLK" }, { "OUT6L", NULL, "SYSCLK" }, { "OUT6R", NULL, "SYSCLK" }, { "IN1L", NULL, "SYSCLK" }, { "IN1R", NULL, "SYSCLK" }, { "IN2L", NULL, "SYSCLK" }, { "IN2R", NULL, "SYSCLK" }, { "IN3L", NULL, "SYSCLK" }, { "IN3R", NULL, "SYSCLK" }, { "IN4L", NULL, "SYSCLK" }, { "IN4R", NULL, "SYSCLK" }, { "MICBIAS1", NULL, "MICVDD" }, { "MICBIAS2", NULL, "MICVDD" }, { "MICBIAS3", NULL, "MICVDD" }, { "Noise Generator", NULL, "SYSCLK" }, { "Tone Generator 1", NULL, "SYSCLK" }, { "Tone Generator 2", NULL, "SYSCLK" }, { "Noise Generator", NULL, "NOISE" }, { "Tone Generator 1", NULL, "TONE" }, { "Tone Generator 2", NULL, "TONE" }, { "AIF1 Capture", NULL, "AIF1TX1" }, { "AIF1 Capture", NULL, "AIF1TX2" }, { "AIF1 Capture", NULL, "AIF1TX3" }, { "AIF1 Capture", NULL, "AIF1TX4" }, { "AIF1 Capture", NULL, "AIF1TX5" }, { "AIF1 Capture", NULL, "AIF1TX6" }, { "AIF1 Capture", NULL, "AIF1TX7" }, { "AIF1 Capture", NULL, "AIF1TX8" }, { "AIF1RX1", NULL, "AIF1 Playback" }, { "AIF1RX2", NULL, "AIF1 Playback" }, { "AIF1RX3", NULL, "AIF1 Playback" }, { "AIF1RX4", NULL, "AIF1 Playback" }, { "AIF1RX5", NULL, "AIF1 Playback" }, { "AIF1RX6", NULL, "AIF1 Playback" }, { "AIF1RX7", NULL, "AIF1 Playback" }, { "AIF1RX8", NULL, "AIF1 Playback" }, { "AIF2 Capture", NULL, "AIF2TX1" }, { "AIF2 Capture", NULL, "AIF2TX2" }, { "AIF2 Capture", NULL, "AIF2TX3" }, { "AIF2 Capture", NULL, "AIF2TX4" }, { "AIF2 Capture", NULL, "AIF2TX5" }, { "AIF2 Capture", NULL, "AIF2TX6" }, { "AIF2RX1", NULL, "AIF2 Playback" }, { "AIF2RX2", NULL, "AIF2 Playback" }, { "AIF2RX3", NULL, "AIF2 Playback" }, { "AIF2RX4", NULL, "AIF2 Playback" }, { "AIF2RX5", NULL, "AIF2 Playback" }, { "AIF2RX6", NULL, "AIF2 Playback" }, { "AIF3 Capture", NULL, "AIF3TX1" }, { "AIF3 Capture", NULL, "AIF3TX2" }, { "AIF3RX1", NULL, "AIF3 Playback" }, { "AIF3RX2", NULL, "AIF3 Playback" }, { "Slim1 Capture", NULL, "SLIMTX1" }, { "Slim1 Capture", NULL, "SLIMTX2" }, { "Slim1 Capture", NULL, "SLIMTX3" }, { "Slim1 Capture", NULL, "SLIMTX4" }, { "SLIMRX1", NULL, "Slim1 Playback" }, { "SLIMRX2", NULL, "Slim1 Playback" }, { "SLIMRX3", NULL, "Slim1 Playback" }, { "SLIMRX4", NULL, "Slim1 Playback" }, { "Slim2 Capture", NULL, "SLIMTX5" }, { "Slim2 Capture", NULL, "SLIMTX6" }, { "SLIMRX5", NULL, "Slim2 Playback" }, { "SLIMRX6", NULL, "Slim2 Playback" }, { "Slim3 Capture", NULL, "SLIMTX7" }, { "Slim3 Capture", NULL, "SLIMTX8" }, { "SLIMRX7", NULL, "Slim3 Playback" }, { "SLIMRX8", NULL, "Slim3 Playback" }, { "AIF1 Playback", NULL, "SYSCLK" }, { "AIF2 Playback", NULL, "SYSCLK" }, { "AIF3 Playback", NULL, "SYSCLK" }, { "Slim1 Playback", NULL, "SYSCLK" }, { "Slim2 Playback", NULL, "SYSCLK" }, { "Slim3 Playback", NULL, "SYSCLK" }, { "AIF1 Capture", NULL, "SYSCLK" }, { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, { "Slim1 Capture", NULL, "SYSCLK" }, { "Slim2 Capture", NULL, "SYSCLK" }, { "Slim3 Capture", NULL, "SYSCLK" }, { "IN1L PGA", NULL, "IN1L" }, { "IN1R PGA", NULL, "IN1R" }, { "IN2L PGA", NULL, "IN2L" }, { "IN2R PGA", NULL, "IN2R" }, { "IN3L PGA", NULL, "IN3L" }, { "IN3R PGA", NULL, "IN3R" }, { "IN4L PGA", NULL, "IN4L" }, { "IN4R PGA", NULL, "IN4R" }, ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), ARIZONA_MIXER_ROUTES("OUT2R", "HPOUT2R"), ARIZONA_MIXER_ROUTES("OUT3L", "HPOUT3L"), ARIZONA_MIXER_ROUTES("OUT3R", "HPOUT3R"), ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUTL"), ARIZONA_MIXER_ROUTES("OUT4R", "SPKOUTR"), ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"), ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"), ARIZONA_MIXER_ROUTES("OUT6L", "SPKDAT2L"), ARIZONA_MIXER_ROUTES("OUT6R", "SPKDAT2R"), ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"), ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"), ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"), ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"), ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"), ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"), ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"), ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"), ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"), ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"), ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"), ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"), ARIZONA_MIXER_ROUTES("AIF2TX3", "AIF2TX3"), ARIZONA_MIXER_ROUTES("AIF2TX4", "AIF2TX4"), ARIZONA_MIXER_ROUTES("AIF2TX5", "AIF2TX5"), ARIZONA_MIXER_ROUTES("AIF2TX6", "AIF2TX6"), ARIZONA_MIXER_ROUTES("AIF3TX1", "AIF3TX1"), ARIZONA_MIXER_ROUTES("AIF3TX2", "AIF3TX2"), ARIZONA_MIXER_ROUTES("SLIMTX1", "SLIMTX1"), ARIZONA_MIXER_ROUTES("SLIMTX2", "SLIMTX2"), ARIZONA_MIXER_ROUTES("SLIMTX3", "SLIMTX3"), ARIZONA_MIXER_ROUTES("SLIMTX4", "SLIMTX4"), ARIZONA_MIXER_ROUTES("SLIMTX5", "SLIMTX5"), ARIZONA_MIXER_ROUTES("SLIMTX6", "SLIMTX6"), ARIZONA_MIXER_ROUTES("SLIMTX7", "SLIMTX7"), ARIZONA_MIXER_ROUTES("SLIMTX8", "SLIMTX8"), ARIZONA_MIXER_ROUTES("EQ1", "EQ1"), ARIZONA_MIXER_ROUTES("EQ2", "EQ2"), ARIZONA_MIXER_ROUTES("EQ3", "EQ3"), ARIZONA_MIXER_ROUTES("EQ4", "EQ4"), ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"), ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"), ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"), ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"), ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"), ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"), ARIZONA_MUX_ROUTES("ASRC1L", "ASRC1L"), ARIZONA_MUX_ROUTES("ASRC1R", "ASRC1R"), ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"), ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"), ARIZONA_DSP_ROUTES("DSP1"), ARIZONA_DSP_ROUTES("DSP2"), ARIZONA_DSP_ROUTES("DSP3"), ARIZONA_DSP_ROUTES("DSP4"), ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"), ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC1INT2"), ARIZONA_MUX_ROUTES("ISRC1INT3", "ISRC1INT3"), ARIZONA_MUX_ROUTES("ISRC1INT4", "ISRC1INT4"), ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"), ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"), ARIZONA_MUX_ROUTES("ISRC1DEC3", "ISRC1DEC3"), ARIZONA_MUX_ROUTES("ISRC1DEC4", "ISRC1DEC4"), ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"), ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"), ARIZONA_MUX_ROUTES("ISRC2INT3", "ISRC2INT3"), ARIZONA_MUX_ROUTES("ISRC2INT4", "ISRC2INT4"), ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"), ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"), ARIZONA_MUX_ROUTES("ISRC2DEC3", "ISRC2DEC3"), ARIZONA_MUX_ROUTES("ISRC2DEC4", "ISRC2DEC4"), ARIZONA_MUX_ROUTES("ISRC3INT1", "ISRC3INT1"), ARIZONA_MUX_ROUTES("ISRC3INT2", "ISRC3INT2"), ARIZONA_MUX_ROUTES("ISRC3INT3", "ISRC3INT3"), ARIZONA_MUX_ROUTES("ISRC3INT4", "ISRC3INT4"), ARIZONA_MUX_ROUTES("ISRC3DEC1", "ISRC3DEC1"), ARIZONA_MUX_ROUTES("ISRC3DEC2", "ISRC3DEC2"), ARIZONA_MUX_ROUTES("ISRC3DEC3", "ISRC3DEC3"), ARIZONA_MUX_ROUTES("ISRC3DEC4", "ISRC3DEC4"), { "AEC Loopback", "HPOUT1L", "OUT1L" }, { "AEC Loopback", "HPOUT1R", "OUT1R" }, { "HPOUT1L", NULL, "OUT1L" }, { "HPOUT1R", NULL, "OUT1R" }, { "AEC Loopback", "HPOUT2L", "OUT2L" }, { "AEC Loopback", "HPOUT2R", "OUT2R" }, { "HPOUT2L", NULL, "OUT2L" }, { "HPOUT2R", NULL, "OUT2R" }, { "AEC Loopback", "HPOUT3L", "OUT3L" }, { "AEC Loopback", "HPOUT3R", "OUT3R" }, { "HPOUT3L", NULL, "OUT3L" }, { "HPOUT3R", NULL, "OUT3R" }, { "AEC Loopback", "SPKOUTL", "OUT4L" }, { "SPKOUTLN", NULL, "OUT4L" }, { "SPKOUTLP", NULL, "OUT4L" }, { "AEC Loopback", "SPKOUTR", "OUT4R" }, { "SPKOUTRN", NULL, "OUT4R" }, { "SPKOUTRP", NULL, "OUT4R" }, { "AEC Loopback", "SPKDAT1L", "OUT5L" }, { "AEC Loopback", "SPKDAT1R", "OUT5R" }, { "SPKDAT1L", NULL, "OUT5L" }, { "SPKDAT1R", NULL, "OUT5R" }, { "AEC Loopback", "SPKDAT2L", "OUT6L" }, { "AEC Loopback", "SPKDAT2R", "OUT6R" }, { "SPKDAT2L", NULL, "OUT6L" }, { "SPKDAT2R", NULL, "OUT6R" }, { "MICSUPP", NULL, "SYSCLK" }, { "DRC1 Signal Activity", NULL, "DRC1L" }, { "DRC1 Signal Activity", NULL, "DRC1R" }, { "DRC2 Signal Activity", NULL, "DRC2L" }, { "DRC2 Signal Activity", NULL, "DRC2R" }, }; static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source, unsigned int Fref, unsigned int Fout) { struct wm5110_priv *wm5110 = snd_soc_codec_get_drvdata(codec); switch (fll_id) { case WM5110_FLL1: return arizona_set_fll(&wm5110->fll[0], source, Fref, Fout); case WM5110_FLL2: return arizona_set_fll(&wm5110->fll[1], source, Fref, Fout); case WM5110_FLL1_REFCLK: return arizona_set_fll_refclk(&wm5110->fll[0], source, Fref, Fout); case WM5110_FLL2_REFCLK: return arizona_set_fll_refclk(&wm5110->fll[1], source, Fref, Fout); default: return -EINVAL; } } #define WM5110_RATES SNDRV_PCM_RATE_8000_192000 #define WM5110_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver wm5110_dai[] = { { .name = "wm5110-aif1", .id = 1, .base = ARIZONA_AIF1_BCLK_CTRL, .playback = { .stream_name = "AIF1 Playback", .channels_min = 1, .channels_max = 8, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "AIF1 Capture", .channels_min = 1, .channels_max = 8, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, .symmetric_samplebits = 1, }, { .name = "wm5110-aif2", .id = 2, .base = ARIZONA_AIF2_BCLK_CTRL, .playback = { .stream_name = "AIF2 Playback", .channels_min = 1, .channels_max = 6, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "AIF2 Capture", .channels_min = 1, .channels_max = 6, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, .symmetric_samplebits = 1, }, { .name = "wm5110-aif3", .id = 3, .base = ARIZONA_AIF3_BCLK_CTRL, .playback = { .stream_name = "AIF3 Playback", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "AIF3 Capture", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_dai_ops, .symmetric_rates = 1, .symmetric_samplebits = 1, }, { .name = "wm5110-slim1", .id = 4, .playback = { .stream_name = "Slim1 Playback", .channels_min = 1, .channels_max = 4, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "Slim1 Capture", .channels_min = 1, .channels_max = 4, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_simple_dai_ops, }, { .name = "wm5110-slim2", .id = 5, .playback = { .stream_name = "Slim2 Playback", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "Slim2 Capture", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_simple_dai_ops, }, { .name = "wm5110-slim3", .id = 6, .playback = { .stream_name = "Slim3 Playback", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .capture = { .stream_name = "Slim3 Capture", .channels_min = 1, .channels_max = 2, .rates = WM5110_RATES, .formats = WM5110_FORMATS, }, .ops = &arizona_simple_dai_ops, }, }; static int wm5110_codec_probe(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec); int i, ret; priv->core.arizona->dapm = dapm; arizona_init_spk(codec); arizona_init_gpio(codec); arizona_init_mono(codec); for (i = 0; i < WM5110_NUM_ADSP; ++i) { ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec); if (ret) goto err_adsp2_codec_probe; } ret = snd_soc_add_codec_controls(codec, arizona_adsp2_rate_controls, WM5110_NUM_ADSP); if (ret) goto err_adsp2_codec_probe; snd_soc_dapm_disable_pin(dapm, "HAPTICS"); return 0; err_adsp2_codec_probe: for (--i; i >= 0; --i) wm_adsp2_codec_remove(&priv->core.adsp[i], codec); return ret; } static int wm5110_codec_remove(struct snd_soc_codec *codec) { struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec); int i; for (i = 0; i < WM5110_NUM_ADSP; ++i) wm_adsp2_codec_remove(&priv->core.adsp[i], codec); priv->core.arizona->dapm = NULL; return 0; } #define WM5110_DIG_VU 0x0200 static unsigned int wm5110_digital_vu[] = { ARIZONA_DAC_DIGITAL_VOLUME_1L, ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_DAC_DIGITAL_VOLUME_2L, ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_DAC_DIGITAL_VOLUME_3L, ARIZONA_DAC_DIGITAL_VOLUME_3R, ARIZONA_DAC_DIGITAL_VOLUME_4L, ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_DAC_DIGITAL_VOLUME_5L, ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_DAC_DIGITAL_VOLUME_6L, ARIZONA_DAC_DIGITAL_VOLUME_6R, }; static struct regmap *wm5110_get_regmap(struct device *dev) { struct wm5110_priv *priv = dev_get_drvdata(dev); return priv->core.arizona->regmap; } static struct snd_soc_codec_driver soc_codec_dev_wm5110 = { .probe = wm5110_codec_probe, .remove = wm5110_codec_remove, .get_regmap = wm5110_get_regmap, .idle_bias_off = true, .set_sysclk = arizona_set_sysclk, .set_pll = wm5110_set_fll, .controls = wm5110_snd_controls, .num_controls = ARRAY_SIZE(wm5110_snd_controls), .dapm_widgets = wm5110_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm5110_dapm_widgets), .dapm_routes = wm5110_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes), }; static int wm5110_probe(struct platform_device *pdev) { struct arizona *arizona = dev_get_drvdata(pdev->dev.parent); struct wm5110_priv *wm5110; int i, ret; wm5110 = devm_kzalloc(&pdev->dev, sizeof(struct wm5110_priv), GFP_KERNEL); if (wm5110 == NULL) return -ENOMEM; platform_set_drvdata(pdev, wm5110); wm5110->core.arizona = arizona; wm5110->core.num_inputs = 8; for (i = 0; i < WM5110_NUM_ADSP; i++) { wm5110->core.adsp[i].part = "wm5110"; wm5110->core.adsp[i].num = i + 1; wm5110->core.adsp[i].type = WMFW_ADSP2; wm5110->core.adsp[i].dev = arizona->dev; wm5110->core.adsp[i].regmap = arizona->regmap; wm5110->core.adsp[i].base = ARIZONA_DSP1_CONTROL_1 + (0x100 * i); wm5110->core.adsp[i].mem = wm5110_dsp_regions[i]; wm5110->core.adsp[i].num_mems = ARRAY_SIZE(wm5110_dsp1_regions); ret = wm_adsp2_init(&wm5110->core.adsp[i]); if (ret != 0) return ret; } for (i = 0; i < ARRAY_SIZE(wm5110->fll); i++) wm5110->fll[i].vco_mult = 3; arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1, ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK, &wm5110->fll[0]); arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1, ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK, &wm5110->fll[1]); /* SR2 fixed at 8kHz, SR3 fixed at 16kHz */ regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_2, ARIZONA_SAMPLE_RATE_2_MASK, 0x11); regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_3, ARIZONA_SAMPLE_RATE_3_MASK, 0x12); for (i = 0; i < ARRAY_SIZE(wm5110_dai); i++) arizona_init_dai(&wm5110->core, i); /* Latch volume update bits */ for (i = 0; i < ARRAY_SIZE(wm5110_digital_vu); i++) regmap_update_bits(arizona->regmap, wm5110_digital_vu[i], WM5110_DIG_VU, WM5110_DIG_VU); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5110, wm5110_dai, ARRAY_SIZE(wm5110_dai)); } static int wm5110_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } static struct platform_driver wm5110_codec_driver = { .driver = { .name = "wm5110-codec", }, .probe = wm5110_probe, .remove = wm5110_remove, }; module_platform_driver(wm5110_codec_driver); MODULE_DESCRIPTION("ASoC WM5110 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm5110-codec");
gpl-2.0
SOKP/kernel_htc_msm8974
drivers/media/platform/msm/vidc/q6_hfi.c
197
35471
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/iommu.h> #include <mach/iommu_domains.h> #include <mach/qdsp6v2/apr.h> #include <mach/subsystem_restart.h> #include "hfi_packetization.h" #include "msm_vidc_debug.h" #include "q6_hfi.h" #include "vidc_hfi_api.h" static struct hal_device_data hal_ctxt; static int write_queue(void *info, u8 *packet) { u32 packet_size_in_words, new_write_idx; struct q6_iface_q_info *qinfo; u32 empty_space, read_idx; u32 *write_ptr; if (!info || !packet) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } qinfo = (struct q6_iface_q_info *) info; packet_size_in_words = (*(u32 *)packet) >> 2; if (packet_size_in_words == 0) { dprintk(VIDC_ERR, "Zero packet size"); return -ENODATA; } read_idx = qinfo->read_idx; empty_space = (qinfo->write_idx >= read_idx) ? (qinfo->q_size - (qinfo->write_idx - read_idx)) : (read_idx - qinfo->write_idx); if (empty_space <= packet_size_in_words) { dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)", empty_space, packet_size_in_words); return -ENOTEMPTY; } new_write_idx = (qinfo->write_idx + packet_size_in_words); write_ptr = (u32 *)(qinfo->buffer + (qinfo->write_idx << 2)); if (new_write_idx < qinfo->q_size) { memcpy(write_ptr, packet, packet_size_in_words << 2); } else { new_write_idx -= qinfo->q_size; memcpy(write_ptr, packet, (packet_size_in_words - new_write_idx) << 2); memcpy((void *)qinfo->buffer, packet + ((packet_size_in_words - new_write_idx) << 2), new_write_idx << 2); } qinfo->write_idx = new_write_idx; return 0; } static int read_queue(void *info, u8 *packet) { u32 packet_size_in_words, new_read_idx; u32 *read_ptr; struct q6_iface_q_info *qinfo; if (!info || !packet) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } qinfo = (struct q6_iface_q_info *) info; if (qinfo->read_idx == qinfo->write_idx) return -EPERM; read_ptr = (u32 *)(qinfo->buffer + (qinfo->read_idx << 2)); packet_size_in_words = (*read_ptr) >> 2; if (packet_size_in_words == 0) { dprintk(VIDC_ERR, "Zero packet size"); return -ENODATA; } new_read_idx = qinfo->read_idx + packet_size_in_words; if (new_read_idx < qinfo->q_size) { memcpy(packet, read_ptr, packet_size_in_words << 2); } else { new_read_idx -= qinfo->q_size; memcpy(packet, read_ptr, (packet_size_in_words - new_read_idx) << 2); memcpy(packet + ((packet_size_in_words - new_read_idx) << 2), (u8 *)qinfo->buffer, new_read_idx << 2); } qinfo->read_idx = new_read_idx; return 0; } static int q6_hfi_iface_eventq_write(struct q6_hfi_device *device, void *pkt) { struct q6_iface_q_info *q_info; int rc = 0; unsigned long flags = 0; if (!device || !pkt) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } q_info = &device->event_queue; if (!q_info->buffer) { dprintk(VIDC_ERR, "cannot write to shared Q"); rc = -ENODATA; goto err_q_write; } spin_lock_irqsave(&q_info->lock, flags); rc = write_queue(q_info, (u8 *)pkt); if (rc) dprintk(VIDC_ERR, "q6_hfi_iface_eventq_write: queue_full"); spin_unlock_irqrestore(&q_info->lock, flags); err_q_write: return rc; } static int q6_hfi_iface_eventq_read(struct q6_hfi_device *device, void *pkt) { int rc = 0; struct q6_iface_q_info *q_info; unsigned long flags = 0; if (!device || !pkt) { dprintk(VIDC_ERR, "Invalid Params\n"); return -EINVAL; } q_info = &device->event_queue; if (!q_info->buffer) { dprintk(VIDC_ERR, "cannot read from shared Q"); rc = -ENODATA; goto read_error; } spin_lock_irqsave(&q_info->lock, flags); rc = read_queue(q_info, (u8 *)pkt); if (rc) { dprintk(VIDC_INFO, "q6_hfi_iface_eventq_read:queue_empty"); rc = -ENODATA; } spin_unlock_irqrestore(&q_info->lock, flags); read_error: return rc; } static void q6_hfi_core_work_handler(struct work_struct *work) { int rc = 0; struct q6_hfi_device *device = container_of( work, struct q6_hfi_device, vidc_worker); u8 packet[VIDC_IFACEQ_MED_PKT_SIZE]; /* need to consume all the messages from the firmware */ do { rc = q6_hfi_iface_eventq_read(device, packet); if (!rc) hfi_process_msg_packet(device->callback, device->device_id, (struct vidc_hal_msg_pkt_hdr *) packet, &device->sess_head, &device->session_lock); } while (!rc); if (rc != -ENODATA) dprintk(VIDC_ERR, "Failed to read from event queue"); } static int q6_hfi_register_iommu_domains(struct q6_hfi_device *device) { struct iommu_domain *domain; int rc = 0, i = 0; struct iommu_set *iommu_group_set; struct iommu_info *iommu_map; if (!device || !device->res) { dprintk(VIDC_ERR, "Invalid parameter: %p", device); return -EINVAL; } iommu_group_set = &device->res->iommu_group_set; for (i = 0; i < iommu_group_set->count; i++) { iommu_map = &iommu_group_set->iommu_maps[i]; iommu_map->group = iommu_group_find(iommu_map->name); if (!iommu_map->group) { dprintk(VIDC_DBG, "Failed to find group :%s\n", iommu_map->name); rc = -EPROBE_DEFER; goto fail_group; } domain = iommu_group_get_iommudata(iommu_map->group); if (IS_ERR_OR_NULL(domain)) { dprintk(VIDC_ERR, "Failed to get domain data for group %p", iommu_map->group); rc = -EINVAL; goto fail_group; } iommu_map->domain = msm_find_domain_no(domain); if (iommu_map->domain < 0) { dprintk(VIDC_ERR, "Failed to get domain index for domain %p", domain); rc = -EINVAL; goto fail_group; } } return rc; fail_group: for (--i; i >= 0; i--) { iommu_map = &iommu_group_set->iommu_maps[i]; if (iommu_map->group) iommu_group_put(iommu_map->group); iommu_map->group = NULL; iommu_map->domain = -1; } return rc; } static void q6_hfi_deregister_iommu_domains(struct q6_hfi_device *device) { struct iommu_set *iommu_group_set; struct iommu_info *iommu_map; int i = 0; if (!device || !device->res) { dprintk(VIDC_ERR, "Invalid parameter: %p", device); return; } iommu_group_set = &device->res->iommu_group_set; for (i = 0; i < iommu_group_set->count; i++) { iommu_map = &iommu_group_set->iommu_maps[i]; if (iommu_map->group) iommu_group_put(iommu_map->group); iommu_map->group = NULL; iommu_map->domain = -1; } } static int q6_hfi_init_resources(struct q6_hfi_device *device, struct msm_vidc_platform_resources *res) { int rc = 0; if (!device || !res) { dprintk(VIDC_ERR, "Invalid device or resources"); return -EINVAL; } device->res = res; rc = q6_hfi_register_iommu_domains(device); if (rc) { if (rc != -EPROBE_DEFER) { dprintk(VIDC_ERR, "Failed to register iommu domains: %d\n", rc); } } return rc; } static void q6_hfi_deinit_resources(struct q6_hfi_device *device) { q6_hfi_deregister_iommu_domains(device); } static void *q6_hfi_add_device(u32 device_id, hfi_cmd_response_callback callback) { struct q6_hfi_device *hdevice = NULL; if (!callback) { dprintk(VIDC_ERR, "Invalid Paramters"); return NULL; } hdevice = (struct q6_hfi_device *) kzalloc(sizeof(struct q6_hfi_device), GFP_KERNEL); if (!hdevice) { dprintk(VIDC_ERR, "failed to allocate new device"); goto err_alloc; } hdevice->device_id = device_id; hdevice->callback = callback; dprintk(VIDC_DBG, "q6_hfi_add_device device_id %d\n", device_id); INIT_WORK(&hdevice->vidc_worker, q6_hfi_core_work_handler); hdevice->vidc_workq = create_singlethread_workqueue( "msm_vidc_workerq_q6"); if (!hdevice->vidc_workq) { dprintk(VIDC_ERR, ": create workq failed\n"); goto error_createq; } if (hal_ctxt.dev_count == 0) INIT_LIST_HEAD(&hal_ctxt.dev_head); INIT_LIST_HEAD(&hdevice->list); list_add_tail(&hdevice->list, &hal_ctxt.dev_head); hal_ctxt.dev_count++; return (void *) hdevice; error_createq: kfree(hdevice); err_alloc: return NULL; } static void *q6_hfi_get_device(u32 device_id, struct msm_vidc_platform_resources *res, hfi_cmd_response_callback callback) { struct q6_hfi_device *device; int rc = 0; if (!callback) { dprintk(VIDC_ERR, "%s Invalid params: %p\n", __func__, callback); return NULL; } device = q6_hfi_add_device(device_id, &handle_cmd_response); if (!device) { dprintk(VIDC_ERR, "Failed to create HFI device\n"); return NULL; } rc = q6_hfi_init_resources(device, res); if (rc) { if (rc != -EPROBE_DEFER) dprintk(VIDC_ERR, "Failed to init resources: %d\n", rc); goto err_fail_init_res; } return device; err_fail_init_res: q6_hfi_delete_device(device); return ERR_PTR(rc); } void q6_hfi_delete_device(void *device) { struct q6_hfi_device *close, *tmp, *dev; if (device) { q6_hfi_deinit_resources(device); dev = (struct q6_hfi_device *) device; list_for_each_entry_safe(close, tmp, &hal_ctxt.dev_head, list) { if (close->device_id == dev->device_id) { hal_ctxt.dev_count--; list_del(&close->list); destroy_workqueue(close->vidc_workq); kfree(close); break; } } } } static inline void q6_hfi_add_apr_hdr(struct q6_hfi_device *dev, struct apr_hdr *hdr, u32 pkt_size) { hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(sizeof(struct apr_hdr)), APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)dev->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_VIDC; hdr->src_port = 0; hdr->dest_port = 0; hdr->pkt_size = pkt_size; hdr->token = 0; hdr->opcode = VIDEO_HFI_CMD_ID; } static int q6_hfi_apr_callback(struct apr_client_data *data, void *priv) { struct q6_hfi_device *device = priv; struct hfi_msg_event_notify_packet pkt = {0}; void *payload = NULL; int rc = 0; if (!data || !device) { dprintk(VIDC_ERR, "%s - Invalid arguments", __func__); return -EINVAL; } dprintk(VIDC_DBG, "%s opcode = %u payload size = %u", __func__, data->opcode, data->payload_size); if (data->opcode == RESET_EVENTS) { dprintk(VIDC_ERR, "%s Received subsystem reset event: %d", __func__, data->reset_event); pkt.packet_type = HFI_MSG_EVENT_NOTIFY; pkt.size = sizeof(pkt); pkt.event_id = HFI_EVENT_SYS_ERROR; pkt.event_data1 = data->opcode; pkt.event_data2 = data->reset_event; payload = &pkt; } else if (data->payload_size > 0) { payload = data->payload; } else { dprintk(VIDC_ERR, "%s - Invalid payload size", __func__); return -EINVAL; } rc = q6_hfi_iface_eventq_write(device, payload); if (rc) { dprintk(VIDC_ERR, "%s failed to write to event queue", __func__); return rc; } queue_work(device->vidc_workq, &device->vidc_worker); return 0; } static void q6_release_event_queue(struct q6_hfi_device *device) { kfree(device->event_queue.buffer); device->event_queue.buffer = NULL; device->event_queue.q_size = 0; device->event_queue.read_idx = 0; device->event_queue.write_idx = 0; } static int q6_init_event_queue(struct q6_hfi_device *dev) { struct q6_iface_q_info *iface_q; if (!dev) { dprintk(VIDC_ERR, "Invalid device"); return -EINVAL; } iface_q = &dev->event_queue; iface_q->buffer = kzalloc(Q6_IFACEQ_QUEUE_SIZE, GFP_KERNEL); if (!iface_q->buffer) { dprintk(VIDC_ERR, "iface_q alloc failed"); q6_release_event_queue(dev); return -ENOMEM; } else { iface_q->q_size = Q6_IFACEQ_QUEUE_SIZE / 4; iface_q->read_idx = 0; iface_q->write_idx = 0; spin_lock_init(&iface_q->lock); } return 0; } static int q6_hfi_core_init(void *device) { struct q6_apr_cmd_sys_init_packet apr; int rc = 0; struct q6_hfi_device *dev = device; if (!dev) { dprintk(VIDC_ERR, "%s: invalid argument\n", __func__); return -ENODEV; } INIT_LIST_HEAD(&dev->sess_head); mutex_init(&dev->session_lock); if (!dev->event_queue.buffer) { rc = q6_init_event_queue(dev); if (rc) { dprintk(VIDC_ERR, "q6_init_event_queue failed"); goto err_core_init; } } else { dprintk(VIDC_ERR, "queue buffer exists"); rc = -EEXIST; goto err_core_init; } q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_sys_init(&apr.pkt, HFI_VIDEO_ARCH_OX); if (rc) { dprintk(VIDC_ERR, "Failed to create sys init pkt"); goto err_core_init; } rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_core_init: return rc; } static int q6_hfi_core_release(void *device) { struct q6_hfi_device *dev = device; if (!dev) { dprintk(VIDC_ERR, "%s: invalid argument\n", __func__); return -ENODEV; } q6_release_event_queue(dev); dprintk(VIDC_DBG, "HAL exited\n"); return 0; } static void *q6_hfi_session_init(void *device, u32 session_id, enum hal_domain session_type, enum hal_video_codec codec_type) { struct q6_apr_cmd_sys_session_init_packet apr; struct hal_session *new_session; struct q6_hfi_device *dev = device; int rc = 0; if (!dev) { dprintk(VIDC_ERR, "%s: invalid argument\n", __func__); return NULL; } new_session = (struct hal_session *) kzalloc(sizeof(struct hal_session), GFP_KERNEL); if (!new_session) { dprintk(VIDC_ERR, "new session fail: Out of memory\n"); return NULL; } new_session->session_id = (u32) session_id; if (session_type == 1) new_session->is_decoder = 0; else if (session_type == 2) new_session->is_decoder = 1; new_session->device = dev; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); if (create_pkt_cmd_sys_session_init(&apr.pkt, (u32)new_session, session_type, codec_type)) { dprintk(VIDC_ERR, "session_init: failed to create packet"); goto err_session_init; } /* * Add session id to the list entry and then send the apr pkt. * This will avoid scenarios where apr_send_pkt is taking more * time and Q6 is returning an ack even before the session id * gets added to the session list. */ mutex_lock(&dev->session_lock); list_add_tail(&new_session->list, &dev->sess_head); mutex_unlock(&dev->session_lock); rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); /* Delete the session id as the send pkt is not successful */ mutex_lock(&dev->session_lock); list_del(&new_session->list); mutex_unlock(&dev->session_lock); rc = -EBADE; goto err_session_init; } return new_session; err_session_init: kfree(new_session); return NULL; } static int q6_hal_send_session_cmd(void *sess, int pkt_type) { struct q6_apr_session_cmd_pkt apr; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !session->device) { dprintk(VIDC_ERR, "%s: invalid arguments\n", __func__); return -EINVAL; } dev = session->device; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_cmd(&apr.pkt, pkt_type, (u32)session); if (rc) { dprintk(VIDC_ERR, "send session cmd: create pkt failed"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_end(void *session) { return q6_hal_send_session_cmd(session, HFI_CMD_SYS_SESSION_END); } static int q6_hfi_session_abort(void *session) { return q6_hal_send_session_cmd(session, HFI_CMD_SYS_SESSION_ABORT); } static int q6_hfi_session_clean(void *session) { struct hal_session *sess_close; if (!session) { dprintk(VIDC_ERR, "Invalid Params %s", __func__); return -EINVAL; } sess_close = session; dprintk(VIDC_DBG, "deleted the session: 0x%x", sess_close->session_id); mutex_lock(&((struct q6_hfi_device *) sess_close->device)->session_lock); list_del(&sess_close->list); mutex_unlock(&((struct q6_hfi_device *) sess_close->device)->session_lock); kfree(sess_close); return 0; } static int q6_hfi_session_set_buffers(void *sess, struct vidc_buffer_addr_info *buffer_info) { struct q6_apr_cmd_session_set_buffers_packet *apr; u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !buffer_info || !session->device) { dprintk(VIDC_ERR, "%s: invalid arguments\n", __func__); return -EINVAL; } dev = session->device; if (buffer_info->buffer_type == HAL_BUFFER_INPUT) return 0; apr = (struct q6_apr_cmd_session_set_buffers_packet *)packet; q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE); rc = create_pkt_cmd_session_set_buffers(&apr->pkt, (u32)session, buffer_info); if (rc) { dprintk(VIDC_ERR, "set buffers: failed to create packet"); goto err_create_pkt; } dprintk(VIDC_INFO, "set buffers: 0x%x", buffer_info->buffer_type); rc = apr_send_pkt(dev->apr, (uint32_t *)apr); if (rc != apr->hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_release_buffers(void *sess, struct vidc_buffer_addr_info *buffer_info) { struct q6_apr_cmd_session_release_buffer_packet *apr; u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !buffer_info || !session->device) { dprintk(VIDC_ERR, "%s: invalid arguments\n", __func__); return -EINVAL; } dev = session->device; if (buffer_info->buffer_type == HAL_BUFFER_INPUT) return 0; apr = (struct q6_apr_cmd_session_release_buffer_packet *) packet; q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE); rc = create_pkt_cmd_session_release_buffers(&apr->pkt, (u32)session, buffer_info); if (rc) { dprintk(VIDC_ERR, "release buffers: failed to create packet"); goto err_create_pkt; } dprintk(VIDC_INFO, "Release buffers: 0x%x", buffer_info->buffer_type); rc = apr_send_pkt(dev->apr, (uint32_t *)apr); if (rc != apr->hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_load_res(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_LOAD_RESOURCES); } static int q6_hfi_session_release_res(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_RELEASE_RESOURCES); } static int q6_hfi_session_start(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_START); } static int q6_hfi_session_stop(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_STOP); } static int q6_hfi_session_suspend(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_SUSPEND); } static int q6_hfi_session_resume(void *sess) { return q6_hal_send_session_cmd(sess, HFI_CMD_SESSION_RESUME); } static int q6_hfi_session_etb(void *sess, struct vidc_frame_data *input_frame) { int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !input_frame || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; if (session->is_decoder) { struct q6_apr_cmd_session_empty_buffer_compressed_packet apr; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_etb_decoder(&apr.pkt, (u32)session, input_frame); if (rc) { dprintk(VIDC_ERR, "Session etb decoder: failed to create pkt"); goto err_create_pkt; } dprintk(VIDC_DBG, "Q DECODER INPUT BUFFER"); dprintk(VIDC_DBG, "addr = 0x%x ts = %lld", input_frame->device_addr, input_frame->timestamp); rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; } else { struct q6_apr_cmd_session_empty_buffer_uncompressed_plane0_packet apr; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_etb_encoder(&apr.pkt, (u32)session, input_frame); if (rc) { dprintk(VIDC_ERR, "Session etb encoder: failed to create pkt"); goto err_create_pkt; } dprintk(VIDC_DBG, "Q ENCODER INPUT BUFFER"); rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; } err_create_pkt: return rc; } static int q6_hfi_session_ftb(void *sess, struct vidc_frame_data *output_frame) { struct q6_apr_cmd_session_fill_buffer_packet apr; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !output_frame || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_ftb(&apr.pkt, (u32)session, output_frame); if (rc) { dprintk(VIDC_ERR, "Session ftb: failed to create pkt"); goto err_create_pkt; } dprintk(VIDC_INFO, "Q OUTPUT BUFFER"); rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_parse_seq_hdr(void *sess, struct vidc_seq_hdr *seq_hdr) { struct q6_apr_cmd_session_parse_sequence_header_packet *apr; int rc = 0; u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !seq_hdr || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; apr = (struct q6_apr_cmd_session_parse_sequence_header_packet *) packet; q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE); rc = create_pkt_cmd_session_parse_seq_header(&apr->pkt, (u32)session, seq_hdr); if (rc) { dprintk(VIDC_ERR, "Session parse seq hdr: failed to create pkt"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)apr); if (rc != apr->hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_get_seq_hdr(void *sess, struct vidc_seq_hdr *seq_hdr) { struct q6_apr_cmd_session_get_sequence_header_packet *apr; int rc = 0; u8 packet[VIDC_IFACEQ_VAR_SMALL_PKT_SIZE]; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !seq_hdr || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; apr = (struct q6_apr_cmd_session_get_sequence_header_packet *) packet; q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_SMALL_PKT_SIZE); rc = create_pkt_cmd_session_get_seq_hdr(&apr->pkt, (u32)session, seq_hdr); if (rc) { dprintk(VIDC_ERR, "Session get seq hdr: failed to create pkt"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)apr); if (rc != apr->hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_get_buf_req(void *sess) { struct q6_apr_cmd_session_get_property_packet apr; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_get_buf_req(&apr.pkt, (u32)session); if (rc) { dprintk(VIDC_ERR, "Session get buf req: failed to create pkt"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_flush(void *sess, enum hal_flush flush_mode) { struct q6_apr_cmd_session_flush_packet apr; int rc = 0; struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; q6_hfi_add_apr_hdr(dev, &apr.hdr, sizeof(apr)); rc = create_pkt_cmd_session_flush(&apr.pkt, (u32)session, flush_mode); if (rc) { dprintk(VIDC_ERR, "Session flush: failed to create pkt"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)&apr); if (rc != apr.hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_set_property(void *sess, enum hal_property ptype, void *pdata) { u8 packet[VIDC_IFACEQ_VAR_LARGE_PKT_SIZE]; struct q6_apr_cmd_session_set_property_packet *apr = (struct q6_apr_cmd_session_set_property_packet *) &packet; struct hal_session *session = sess; int rc = 0; struct q6_hfi_device *dev; if (!session || !pdata || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; dprintk(VIDC_DBG, "in set_prop,with prop id: 0x%x", ptype); q6_hfi_add_apr_hdr(dev, &apr->hdr, VIDC_IFACEQ_VAR_LARGE_PKT_SIZE); rc = create_pkt_cmd_session_set_property(&apr->pkt, (u32)session, ptype, pdata); if (rc) { dprintk(VIDC_ERR, "set property: failed to create packet"); goto err_create_pkt; } rc = apr_send_pkt(dev->apr, (uint32_t *)apr); if (rc != apr->hdr.pkt_size) { dprintk(VIDC_ERR, "%s: apr_send_pkt failed rc: %d", __func__, rc); rc = -EBADE; } else rc = 0; err_create_pkt: return rc; } static int q6_hfi_session_get_property(void *sess, enum hal_property ptype, void *pdata) { struct hal_session *session = sess; struct q6_hfi_device *dev; if (!session || !pdata || !session->device) { dprintk(VIDC_ERR, "Invalid Params"); return -EINVAL; } dev = session->device; dprintk(VIDC_DBG, "IN func: , with property id: %d", ptype); switch (ptype) { case HAL_CONFIG_FRAME_RATE: break; case HAL_PARAM_UNCOMPRESSED_FORMAT_SELECT: break; case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: break; case HAL_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: break; case HAL_PARAM_EXTRA_DATA_HEADER_CONFIG: break; case HAL_PARAM_FRAME_SIZE: break; case HAL_CONFIG_REALTIME: break; case HAL_PARAM_BUFFER_COUNT_ACTUAL: break; case HAL_PARAM_NAL_STREAM_FORMAT_SELECT: break; case HAL_PARAM_VDEC_OUTPUT_ORDER: break; case HAL_PARAM_VDEC_PICTURE_TYPE_DECODE: break; case HAL_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: break; case HAL_CONFIG_VDEC_POST_LOOP_DEBLOCKER: break; case HAL_PARAM_VDEC_MULTI_STREAM: break; case HAL_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: break; case HAL_PARAM_DIVX_FORMAT: break; case HAL_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: break; case HAL_PARAM_VDEC_CONTINUE_DATA_TRANSFER: break; case HAL_CONFIG_VDEC_MB_ERROR_MAP: break; case HAL_CONFIG_VENC_REQUEST_IFRAME: break; case HAL_PARAM_VENC_MPEG4_SHORT_HEADER: break; case HAL_PARAM_VENC_MPEG4_AC_PREDICTION: break; case HAL_CONFIG_VENC_TARGET_BITRATE: break; case HAL_PARAM_PROFILE_LEVEL_CURRENT: break; case HAL_PARAM_VENC_H264_ENTROPY_CONTROL: break; case HAL_PARAM_VENC_RATE_CONTROL: break; case HAL_PARAM_VENC_MPEG4_TIME_RESOLUTION: break; case HAL_PARAM_VENC_MPEG4_HEADER_EXTENSION: break; case HAL_PARAM_VENC_H264_DEBLOCK_CONTROL: break; case HAL_PARAM_VENC_SESSION_QP: break; case HAL_CONFIG_VENC_INTRA_PERIOD: break; case HAL_CONFIG_VENC_IDR_PERIOD: break; case HAL_CONFIG_VPE_OPERATIONS: break; case HAL_PARAM_VENC_INTRA_REFRESH: break; case HAL_PARAM_VENC_MULTI_SLICE_CONTROL: break; case HAL_CONFIG_VPE_DEINTERLACE: break; case HAL_SYS_DEBUG_CONFIG: break; /*FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET*/ case HAL_CONFIG_BUFFER_REQUIREMENTS: case HAL_CONFIG_PRIORITY: case HAL_CONFIG_BATCH_INFO: case HAL_PARAM_METADATA_PASS_THROUGH: case HAL_SYS_IDLE_INDICATOR: case HAL_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: case HAL_PARAM_INTERLACE_FORMAT_SUPPORTED: case HAL_PARAM_CHROMA_SITE: case HAL_PARAM_PROPERTIES_SUPPORTED: case HAL_PARAM_PROFILE_LEVEL_SUPPORTED: case HAL_PARAM_CAPABILITY_SUPPORTED: case HAL_PARAM_NAL_STREAM_FORMAT_SUPPORTED: case HAL_PARAM_MULTI_VIEW_FORMAT: case HAL_PARAM_MAX_SEQUENCE_HEADER_SIZE: case HAL_PARAM_CODEC_SUPPORTED: case HAL_PARAM_VDEC_MULTI_VIEW_SELECT: case HAL_PARAM_VDEC_MB_QUANTIZATION: case HAL_PARAM_VDEC_NUM_CONCEALED_MB: case HAL_PARAM_VDEC_H264_ENTROPY_SWITCHING: case HAL_PARAM_VENC_SLICE_DELIVERY_MODE: case HAL_PARAM_VENC_MPEG4_DATA_PARTITIONING: case HAL_CONFIG_BUFFER_COUNT_ACTUAL: case HAL_CONFIG_VDEC_MULTI_STREAM: case HAL_PARAM_VENC_MULTI_SLICE_INFO: case HAL_CONFIG_VENC_TIMESTAMP_SCALE: case HAL_PARAM_VENC_LOW_LATENCY: default: dprintk(VIDC_INFO, "DEFAULT: Calling 0x%x", ptype); break; } return 0; } static int q6_hfi_unset_ocmem(void *dev) { (void)dev; /* Q6 does not support ocmem */ return -EINVAL; } static int q6_hfi_iommu_get_domain_partition(void *dev, u32 flags, u32 buffer_type, int *domain, int *partition) { (void)dev; dprintk(VIDC_ERR, "Not implemented: %s", __func__); return -ENOTSUPP; } static int q6_hfi_iommu_attach(struct q6_hfi_device *device) { int rc = 0; struct iommu_domain *domain; int i; struct iommu_set *iommu_group_set; struct iommu_group *group; struct iommu_info *iommu_map; if (!device || !device->res) { dprintk(VIDC_ERR, "Invalid parameter: %p", device); return -EINVAL; } iommu_group_set = &device->res->iommu_group_set; for (i = 0; i < iommu_group_set->count; i++) { iommu_map = &iommu_group_set->iommu_maps[i]; group = iommu_map->group; domain = msm_get_iommu_domain(iommu_map->domain); if (IS_ERR_OR_NULL(domain)) { dprintk(VIDC_ERR, "Failed to get domain: %s", iommu_map->name); rc = IS_ERR(domain) ? PTR_ERR(domain) : -EINVAL; break; } dprintk(VIDC_DBG, "Attaching domain(id:%d) %p to group %p", iommu_map->domain, domain, group); rc = iommu_attach_group(domain, group); if (rc) { dprintk(VIDC_ERR, "IOMMU attach failed: %s", iommu_map->name); break; } } if (i < iommu_group_set->count) { i--; for (; i >= 0; i--) { iommu_map = &iommu_group_set->iommu_maps[i]; group = iommu_map->group; domain = msm_get_iommu_domain(iommu_map->domain); if (group && domain) iommu_detach_group(domain, group); } } return rc; } static void q6_hfi_iommu_detach(struct q6_hfi_device *device) { struct iommu_group *group; struct iommu_domain *domain; struct iommu_set *iommu_group_set; struct iommu_info *iommu_map; int i; if (!device || !device->res) { dprintk(VIDC_ERR, "Invalid parameter: %p", device); return; } iommu_group_set = &device->res->iommu_group_set; for (i = 0; i < iommu_group_set->count; i++) { iommu_map = &iommu_group_set->iommu_maps[i]; group = iommu_map->group; domain = msm_get_iommu_domain(iommu_map->domain); if (group && domain) iommu_detach_group(domain, group); } } static int q6_hfi_load_fw(void *dev) { int rc = 0; struct q6_hfi_device *device = dev; if (!device) return -EINVAL; if (!device->resources.fw.cookie) device->resources.fw.cookie = subsystem_get("adsp"); if (IS_ERR_OR_NULL(device->resources.fw.cookie)) { dprintk(VIDC_ERR, "Failed to download firmware\n"); rc = -ENOMEM; goto fail_subsystem_get; } /*Set Q6 to loaded state*/ apr_set_q6_state(APR_SUBSYS_LOADED); device->apr = apr_register("ADSP", "VIDC", (apr_fn)q6_hfi_apr_callback, 0xFFFFFFFF, device); if (device->apr == NULL) { dprintk(VIDC_ERR, "Failed to register with QDSP6"); rc = -EINVAL; goto fail_apr_register; } rc = q6_hfi_iommu_attach(device); if (rc) { dprintk(VIDC_ERR, "Failed to attach iommu"); goto fail_iommu_attach; } return rc; fail_iommu_attach: apr_deregister(device->apr); device->apr = NULL; fail_apr_register: subsystem_put(device->resources.fw.cookie); device->resources.fw.cookie = NULL; fail_subsystem_get: return rc; } int q6_hfi_capability_check(u32 fourcc, u32 width, u32 *max_width, u32 *max_height) { int rc = 0; if (!max_width || !max_height) { dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__); return -EINVAL; } if (width > *max_width) { dprintk(VIDC_ERR, "Unsupported width = %u supported max width = %u\n", width, *max_width); rc = -ENOTSUPP; } return rc; } static void q6_hfi_unload_fw(void *hfi_device_data) { struct q6_hfi_device *device = hfi_device_data; if (!device) return; if (device->resources.fw.cookie) { q6_hfi_iommu_detach(device); subsystem_put(device->resources.fw.cookie); device->resources.fw.cookie = NULL; } if (device->apr) { if (apr_deregister(device->apr)) dprintk(VIDC_ERR, "Failed to deregister APR"); device->apr = NULL; } } static int q6_hfi_get_stride_scanline(int color_fmt, int width, int height, int *stride, int *scanlines) { *stride = VENUS_Y_STRIDE(color_fmt, width); *scanlines = VENUS_Y_SCANLINES(color_fmt, height); return 0; } static void q6_init_hfi_callbacks(struct hfi_device *hdev) { hdev->core_init = q6_hfi_core_init; hdev->core_release = q6_hfi_core_release; hdev->session_init = q6_hfi_session_init; hdev->session_end = q6_hfi_session_end; hdev->session_abort = q6_hfi_session_abort; hdev->session_clean = q6_hfi_session_clean; hdev->session_set_buffers = q6_hfi_session_set_buffers; hdev->session_release_buffers = q6_hfi_session_release_buffers; hdev->session_load_res = q6_hfi_session_load_res; hdev->session_release_res = q6_hfi_session_release_res; hdev->session_start = q6_hfi_session_start; hdev->session_stop = q6_hfi_session_stop; hdev->session_suspend = q6_hfi_session_suspend; hdev->session_resume = q6_hfi_session_resume; hdev->session_etb = q6_hfi_session_etb; hdev->session_ftb = q6_hfi_session_ftb; hdev->session_parse_seq_hdr = q6_hfi_session_parse_seq_hdr; hdev->session_get_seq_hdr = q6_hfi_session_get_seq_hdr; hdev->session_get_buf_req = q6_hfi_session_get_buf_req; hdev->session_flush = q6_hfi_session_flush; hdev->session_set_property = q6_hfi_session_set_property; hdev->session_get_property = q6_hfi_session_get_property; hdev->unset_ocmem = q6_hfi_unset_ocmem; hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition; hdev->load_fw = q6_hfi_load_fw; hdev->capability_check = q6_hfi_capability_check; hdev->unload_fw = q6_hfi_unload_fw; hdev->get_stride_scanline = q6_hfi_get_stride_scanline; } int q6_hfi_initialize(struct hfi_device *hdev, u32 device_id, struct msm_vidc_platform_resources *res, hfi_cmd_response_callback callback) { int rc = 0; if (!hdev || !res || !callback) { dprintk(VIDC_ERR, "Invalid params: %p %p %p", hdev, res, callback); rc = -EINVAL; goto err_hfi_init; } hdev->hfi_device_data = q6_hfi_get_device(device_id, res, callback); if (IS_ERR_OR_NULL(hdev->hfi_device_data)) { rc = PTR_ERR(hdev->hfi_device_data); rc = !rc ? -EINVAL : rc; goto err_hfi_init; } q6_init_hfi_callbacks(hdev); err_hfi_init: return rc; }
gpl-2.0
bftg/gcc-5.3.0
gcc/testsuite/gcc.c-torture/execute/va-arg-pack-1.c
197
2623
/* __builtin_va_arg_pack () builtin tests. */ #include <stdarg.h> extern void abort (void); int v1 = 8; long int v2 = 3; void *v3 = (void *) &v2; struct A { char c[16]; } v4 = { "foo" }; long double v5 = 40; char seen[20]; int cnt; __attribute__ ((noinline)) int foo1 (int x, int y, ...) { int i; long int l; void *v; struct A a; long double ld; va_list ap; va_start (ap, y); if (x < 0 || x >= 20 || seen[x]) abort (); seen[x] = ++cnt; if (y != 6) abort (); i = va_arg (ap, int); if (i != 5) abort (); switch (x) { case 0: i = va_arg (ap, int); if (i != 9 || v1 != 9) abort (); a = va_arg (ap, struct A); if (__builtin_memcmp (a.c, v4.c, sizeof (a.c)) != 0) abort (); v = (void *) va_arg (ap, struct A *); if (v != (void *) &v4) abort (); l = va_arg (ap, long int); if (l != 3 || v2 != 4) abort (); break; case 1: ld = va_arg (ap, long double); if (ld != 41 || v5 != ld) abort (); i = va_arg (ap, int); if (i != 8) abort (); v = va_arg (ap, void *); if (v != &v2) abort (); break; case 2: break; default: abort (); } va_end (ap); return x; } __attribute__ ((noinline)) int foo2 (int x, int y, ...) { long long int ll; void *v; struct A a, b; long double ld; va_list ap; va_start (ap, y); if (x < 0 || x >= 20 || seen[x]) abort (); seen[x] = ++cnt | 64; if (y != 10) abort (); switch (x) { case 11: break; case 12: ld = va_arg (ap, long double); if (ld != 41 || v5 != 40) abort (); a = va_arg (ap, struct A); if (__builtin_memcmp (a.c, v4.c, sizeof (a.c)) != 0) abort (); b = va_arg (ap, struct A); if (__builtin_memcmp (b.c, v4.c, sizeof (b.c)) != 0) abort (); v = va_arg (ap, void *); if (v != &v2) abort (); ll = va_arg (ap, long long int); if (ll != 16LL) abort (); break; case 2: break; default: abort (); } va_end (ap); return x + 8; } __attribute__ ((noinline)) int foo3 (void) { return 6; } extern inline __attribute__ ((always_inline, gnu_inline)) int bar (int x, ...) { if (x < 10) return foo1 (x, foo3 (), 5, __builtin_va_arg_pack ()); return foo2 (x, foo3 () + 4, __builtin_va_arg_pack ()); } int main (void) { if (bar (0, ++v1, v4, &v4, v2++) != 0) abort (); if (bar (1, ++v5, 8, v3) != 1) abort (); if (bar (2) != 2) abort (); if (bar (v1 + 2) != 19) abort (); if (bar (v1 + 3, v5--, v4, v4, v3, 16LL) != 20) abort (); return 0; }
gpl-2.0
ubuntustudio-kernel/ubuntu-saucy-lowlatency
arch/sparc/kernel/ds.c
453
26272
/* ds.c: Domain Services driver for Logical Domains * * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/kthread.h> #include <linux/reboot.h> #include <linux/cpu.h> #include <asm/hypervisor.h> #include <asm/ldc.h> #include <asm/vio.h> #include <asm/mdesc.h> #include <asm/head.h> #include <asm/irq.h> #include "kernel.h" #define DRV_MODULE_NAME "ds" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" #define DRV_MODULE_RELDATE "Jul 11, 2007" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM domain services driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); struct ds_msg_tag { __u32 type; #define DS_INIT_REQ 0x00 #define DS_INIT_ACK 0x01 #define DS_INIT_NACK 0x02 #define DS_REG_REQ 0x03 #define DS_REG_ACK 0x04 #define DS_REG_NACK 0x05 #define DS_UNREG_REQ 0x06 #define DS_UNREG_ACK 0x07 #define DS_UNREG_NACK 0x08 #define DS_DATA 0x09 #define DS_NACK 0x0a __u32 len; }; /* Result codes */ #define DS_OK 0x00 #define DS_REG_VER_NACK 0x01 #define DS_REG_DUP 0x02 #define DS_INV_HDL 0x03 #define DS_TYPE_UNKNOWN 0x04 struct ds_version { __u16 major; __u16 minor; }; struct ds_ver_req { struct ds_msg_tag tag; struct ds_version ver; }; struct ds_ver_ack { struct ds_msg_tag tag; __u16 minor; }; struct ds_ver_nack { struct ds_msg_tag tag; __u16 major; }; struct ds_reg_req { struct ds_msg_tag tag; __u64 handle; __u16 major; __u16 minor; char svc_id[0]; }; struct ds_reg_ack { struct ds_msg_tag tag; __u64 handle; __u16 minor; }; struct ds_reg_nack { struct ds_msg_tag tag; __u64 handle; __u16 major; }; struct ds_unreg_req { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_ack { struct ds_msg_tag tag; __u64 handle; }; struct ds_unreg_nack { struct ds_msg_tag tag; __u64 handle; }; struct ds_data { struct ds_msg_tag tag; __u64 handle; }; struct ds_data_nack { struct ds_msg_tag tag; __u64 handle; __u64 result; }; struct ds_info; struct ds_cap_state { __u64 handle; void (*data)(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); const char *service_id; u8 state; #define CAP_STATE_UNKNOWN 0x00 #define CAP_STATE_REG_SENT 0x01 #define CAP_STATE_REGISTERED 0x02 }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #ifdef CONFIG_HOTPLUG_CPU static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); #endif static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len); static struct ds_cap_state ds_states_template[] = { { .service_id = "md-update", .data = md_update_data, }, { .service_id = "domain-shutdown", .data = domain_shutdown_data, }, { .service_id = "domain-panic", .data = domain_panic_data, }, #ifdef CONFIG_HOTPLUG_CPU { .service_id = "dr-cpu", .data = dr_cpu_data, }, #endif { .service_id = "pri", .data = ds_pri_data, }, { .service_id = "var-config", .data = ds_var_data, }, { .service_id = "var-config-backup", .data = ds_var_data, }, }; static DEFINE_SPINLOCK(ds_lock); struct ds_info { struct ldc_channel *lp; u8 hs_state; #define DS_HS_START 0x01 #define DS_HS_DONE 0x02 u64 id; void *rcv_buf; int rcv_buf_len; struct ds_cap_state *ds_states; int num_ds_states; struct ds_info *next; }; static struct ds_info *ds_info_list; static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle) { unsigned int index = handle >> 32; if (index >= dp->num_ds_states) return NULL; return &dp->ds_states[index]; } static struct ds_cap_state *find_cap_by_string(struct ds_info *dp, const char *name) { int i; for (i = 0; i < dp->num_ds_states; i++) { if (strcmp(dp->ds_states[i].service_id, name)) continue; return &dp->ds_states[i]; } return NULL; } static int __ds_send(struct ldc_channel *lp, void *data, int len) { int err, limit = 1000; err = -EINVAL; while (limit-- > 0) { err = ldc_write(lp, data, len); if (!err || (err != -EAGAIN)) break; udelay(1); } return err; } static int ds_send(struct ldc_channel *lp, void *data, int len) { unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); err = __ds_send(lp, data, len); spin_unlock_irqrestore(&ds_lock, flags); return err; } struct ds_md_update_req { __u64 req_num; }; struct ds_md_update_res { __u64 req_num; __u32 result; }; static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_md_update_req *rp; struct { struct ds_data data; struct ds_md_update_res res; } pkt; rp = (struct ds_md_update_req *) (dpkt + 1); printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id); mdesc_update(); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; ds_send(lp, &pkt, sizeof(pkt)); } struct ds_shutdown_req { __u64 req_num; __u32 ms_delay; }; struct ds_shutdown_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_shutdown_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_shutdown_req *rp; struct { struct ds_data data; struct ds_shutdown_res res; } pkt; rp = (struct ds_shutdown_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Shutdown request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); orderly_poweroff(true); } struct ds_panic_req { __u64 req_num; }; struct ds_panic_res { __u64 req_num; __u32 result; char reason[1]; }; static void domain_panic_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ldc_channel *lp = dp->lp; struct ds_data *dpkt = buf; struct ds_panic_req *rp; struct { struct ds_data data; struct ds_panic_res res; } pkt; rp = (struct ds_panic_req *) (dpkt + 1); printk(KERN_ALERT "ds-%llu: Panic request from " "LDOM manager received.\n", dp->id); memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); pkt.data.handle = cp->handle; pkt.res.req_num = rp->req_num; pkt.res.result = DS_OK; pkt.res.reason[0] = 0; ds_send(lp, &pkt, sizeof(pkt)); panic("PANIC requested by LDOM manager."); } #ifdef CONFIG_HOTPLUG_CPU struct dr_cpu_tag { __u64 req_num; __u32 type; #define DR_CPU_CONFIGURE 0x43 #define DR_CPU_UNCONFIGURE 0x55 #define DR_CPU_FORCE_UNCONFIGURE 0x46 #define DR_CPU_STATUS 0x53 /* Responses */ #define DR_CPU_OK 0x6f #define DR_CPU_ERROR 0x65 __u32 num_records; }; struct dr_cpu_resp_entry { __u32 cpu; __u32 result; #define DR_CPU_RES_OK 0x00 #define DR_CPU_RES_FAILURE 0x01 #define DR_CPU_RES_BLOCKED 0x02 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 #define DR_CPU_RES_NOT_IN_MD 0x04 __u32 stat; #define DR_CPU_STAT_NOT_PRESENT 0x00 #define DR_CPU_STAT_UNCONFIGURED 0x01 #define DR_CPU_STAT_CONFIGURED 0x02 __u32 str_off; }; static void __dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); struct { struct ds_data data; struct dr_cpu_tag tag; } pkt; int msg_len; memset(&pkt, 0, sizeof(pkt)); pkt.data.tag.type = DS_DATA; pkt.data.handle = cp->handle; pkt.tag.req_num = tag->req_num; pkt.tag.type = DR_CPU_ERROR; pkt.tag.num_records = 0; msg_len = (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag)); pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); __ds_send(dp->lp, &pkt, msg_len); } static void dr_cpu_send_error(struct ds_info *dp, struct ds_cap_state *cp, struct ds_data *data) { unsigned long flags; spin_lock_irqsave(&ds_lock, flags); __dr_cpu_send_error(dp, cp, data); spin_unlock_irqrestore(&ds_lock, flags); } #define CPU_SENTINEL 0xffffffff static void purge_dups(u32 *list, u32 num_ents) { unsigned int i; for (i = 0; i < num_ents; i++) { u32 cpu = list[i]; unsigned int j; if (cpu == CPU_SENTINEL) continue; for (j = i + 1; j < num_ents; j++) { if (list[j] == cpu) list[j] = CPU_SENTINEL; } } } static int dr_cpu_size_response(int ncpus) { return (sizeof(struct ds_data) + sizeof(struct dr_cpu_tag) + (sizeof(struct dr_cpu_resp_entry) * ncpus)); } static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, u64 handle, int resp_len, int ncpus, cpumask_t *mask, u32 default_stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i, cpu; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); resp->tag.type = DS_DATA; resp->tag.len = resp_len - sizeof(struct ds_msg_tag); resp->handle = handle; tag->req_num = req_num; tag->type = DR_CPU_OK; tag->num_records = ncpus; i = 0; for_each_cpu(cpu, mask) { ent[i].cpu = cpu; ent[i].result = DR_CPU_RES_OK; ent[i].stat = default_stat; i++; } BUG_ON(i != ncpus); } static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, u32 res, u32 stat) { struct dr_cpu_resp_entry *ent; struct dr_cpu_tag *tag; int i; tag = (struct dr_cpu_tag *) (resp + 1); ent = (struct dr_cpu_resp_entry *) (tag + 1); for (i = 0; i < ncpus; i++) { if (ent[i].cpu != cpu) continue; ent[i].result = res; ent[i].stat = stat; break; } } static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_CONFIGURED); mdesc_populate_present_mask(mask); mdesc_fill_in_cpu_data(mask); for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", dp->id, cpu); err = cpu_up(cpu); if (err) { __u32 res = DR_CPU_RES_FAILURE; __u32 stat = DR_CPU_STAT_UNCONFIGURED; if (!cpu_present(cpu)) { /* CPU not present in MD */ res = DR_CPU_RES_NOT_IN_MD; stat = DR_CPU_STAT_NOT_PRESENT; } else if (err == -ENODEV) { /* CPU did not call in successfully */ res = DR_CPU_RES_CPU_NOT_RESPONDING; } printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n", dp->id, err); dr_cpu_mark(resp, cpu, ncpus, res, stat); } } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); /* Redistribute IRQs, taking into account the new cpus. */ fixup_irqs(); return 0; } static int dr_cpu_unconfigure(struct ds_info *dp, struct ds_cap_state *cp, u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; dr_cpu_init_response(resp, req_num, cp->handle, resp_len, ncpus, mask, DR_CPU_STAT_UNCONFIGURED); for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", dp->id, cpu); err = cpu_down(cpu); if (err) dr_cpu_mark(resp, cpu, ncpus, DR_CPU_RES_FAILURE, DR_CPU_STAT_CONFIGURED); } spin_lock_irqsave(&ds_lock, flags); __ds_send(dp->lp, resp, resp_len); spin_unlock_irqrestore(&ds_lock, flags); kfree(resp); return 0; } static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *data = buf; struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); u32 *cpu_list = (u32 *) (tag + 1); u64 req_num = tag->req_num; cpumask_t mask; unsigned int i; int err; switch (tag->type) { case DR_CPU_CONFIGURE: case DR_CPU_UNCONFIGURE: case DR_CPU_FORCE_UNCONFIGURE: break; default: dr_cpu_send_error(dp, cp, data); return; } purge_dups(cpu_list, tag->num_records); cpumask_clear(&mask); for (i = 0; i < tag->num_records; i++) { if (cpu_list[i] == CPU_SENTINEL) continue; if (cpu_list[i] < nr_cpu_ids) cpumask_set_cpu(cpu_list[i], &mask); } if (tag->type == DR_CPU_CONFIGURE) err = dr_cpu_configure(dp, cp, req_num, &mask); else err = dr_cpu_unconfigure(dp, cp, req_num, &mask); if (err) dr_cpu_send_error(dp, cp, data); } #endif /* CONFIG_HOTPLUG_CPU */ struct ds_pri_msg { __u64 req_num; __u64 type; #define DS_PRI_REQUEST 0x00 #define DS_PRI_DATA 0x01 #define DS_PRI_UPDATE 0x02 }; static void ds_pri_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_pri_msg *rp; rp = (struct ds_pri_msg *) (dpkt + 1); printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n", dp->id, rp->req_num, rp->type, len); } struct ds_var_hdr { __u32 type; #define DS_VAR_SET_REQ 0x00 #define DS_VAR_DELETE_REQ 0x01 #define DS_VAR_SET_RESP 0x02 #define DS_VAR_DELETE_RESP 0x03 }; struct ds_var_set_msg { struct ds_var_hdr hdr; char name_and_value[0]; }; struct ds_var_delete_msg { struct ds_var_hdr hdr; char name[0]; }; struct ds_var_resp { struct ds_var_hdr hdr; __u32 result; #define DS_VAR_SUCCESS 0x00 #define DS_VAR_NO_SPACE 0x01 #define DS_VAR_INVALID_VAR 0x02 #define DS_VAR_INVALID_VAL 0x03 #define DS_VAR_NOT_PRESENT 0x04 }; static DEFINE_MUTEX(ds_var_mutex); static int ds_var_doorbell; static int ds_var_response; static void ds_var_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, int len) { struct ds_data *dpkt = buf; struct ds_var_resp *rp; rp = (struct ds_var_resp *) (dpkt + 1); if (rp->hdr.type != DS_VAR_SET_RESP && rp->hdr.type != DS_VAR_DELETE_RESP) return; ds_var_response = rp->result; wmb(); ds_var_doorbell = 1; } void ldom_set_var(const char *var, const char *value) { struct ds_cap_state *cp; struct ds_info *dp; unsigned long flags; spin_lock_irqsave(&ds_lock, flags); cp = NULL; for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } if (!cp) { for (dp = ds_info_list; dp; dp = dp->next) { struct ds_cap_state *tmp; tmp = find_cap_by_string(dp, "var-config-backup"); if (tmp && tmp->state == CAP_STATE_REGISTERED) { cp = tmp; break; } } } spin_unlock_irqrestore(&ds_lock, flags); if (cp) { union { struct { struct ds_data data; struct ds_var_set_msg msg; } header; char all[512]; } pkt; char *base, *p; int msg_len, loops; if (strlen(var) + strlen(value) + 2 > sizeof(pkt) - sizeof(pkt.header)) { printk(KERN_ERR PFX "contents length: %zu, which more than max: %lu," "so could not set (%s) variable to (%s).\n", strlen(var) + strlen(value) + 2, sizeof(pkt) - sizeof(pkt.header), var, value); return; } memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; pkt.header.msg.hdr.type = DS_VAR_SET_REQ; base = p = &pkt.header.msg.name_and_value[0]; strcpy(p, var); p += strlen(var) + 1; strcpy(p, value); p += strlen(value) + 1; msg_len = (sizeof(struct ds_data) + sizeof(struct ds_var_set_msg) + (p - base)); msg_len = (msg_len + 3) & ~3; pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); mutex_lock(&ds_var_mutex); spin_lock_irqsave(&ds_lock, flags); ds_var_doorbell = 0; ds_var_response = -1; __ds_send(dp->lp, &pkt, msg_len); spin_unlock_irqrestore(&ds_lock, flags); loops = 1000; while (ds_var_doorbell == 0) { if (loops-- < 0) break; barrier(); udelay(100); } mutex_unlock(&ds_var_mutex); if (ds_var_doorbell == 0 || ds_var_response != DS_VAR_SUCCESS) printk(KERN_ERR "ds-%llu: var-config [%s:%s] " "failed, response(%d).\n", dp->id, var, value, ds_var_response); } else { printk(KERN_ERR PFX "var-config not registered so " "could not set (%s) variable to (%s).\n", var, value); } } static char full_boot_str[256] __attribute__((aligned(32))); static int reboot_data_supported; void ldom_reboot(const char *boot_command) { /* Don't bother with any of this if the boot_command * is empty. */ if (boot_command && strlen(boot_command)) { unsigned long len; snprintf(full_boot_str, sizeof(full_boot_str), "boot %s", boot_command); len = strlen(full_boot_str); if (reboot_data_supported) { unsigned long ra = kimage_addr_to_ra(full_boot_str); unsigned long hv_ret; hv_ret = sun4v_reboot_data_set(ra, len); if (hv_ret != HV_EOK) pr_err("SUN4V: Unable to set reboot data " "hv_ret=%lu\n", hv_ret); } else { ldom_set_var("reboot-command", full_boot_str); } } sun4v_mach_sir(); } void ldom_power_off(void) { sun4v_mach_exit(0); } static void ds_conn_reset(struct ds_info *dp) { printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n", dp->id, __builtin_return_address(0)); } static int register_services(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; int i; for (i = 0; i < dp->num_ds_states; i++) { struct { struct ds_reg_req req; u8 id_buf[256]; } pbuf; struct ds_cap_state *cp = &dp->ds_states[i]; int err, msg_len; u64 new_count; if (cp->state == CAP_STATE_REGISTERED) continue; new_count = sched_clock() & 0xffffffff; cp->handle = ((u64) i << 32) | new_count; msg_len = (sizeof(struct ds_reg_req) + strlen(cp->service_id)); memset(&pbuf, 0, sizeof(pbuf)); pbuf.req.tag.type = DS_REG_REQ; pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); pbuf.req.handle = cp->handle; pbuf.req.major = 1; pbuf.req.minor = 0; strcpy(pbuf.req.svc_id, cp->service_id); err = __ds_send(lp, &pbuf, msg_len); if (err > 0) cp->state = CAP_STATE_REG_SENT; } return 0; } static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) { if (dp->hs_state == DS_HS_START) { if (pkt->type != DS_INIT_ACK) goto conn_reset; dp->hs_state = DS_HS_DONE; return register_services(dp); } if (dp->hs_state != DS_HS_DONE) goto conn_reset; if (pkt->type == DS_REG_ACK) { struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; struct ds_cap_state *cp = find_cap(dp, ap->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG ACK for unknown " "handle %llx\n", dp->id, ap->handle); return 0; } printk(KERN_INFO "ds-%llu: Registered %s service.\n", dp->id, cp->service_id); cp->state = CAP_STATE_REGISTERED; } else if (pkt->type == DS_REG_NACK) { struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; struct ds_cap_state *cp = find_cap(dp, np->handle); if (!cp) { printk(KERN_ERR "ds-%llu: REG NACK for " "unknown handle %llx\n", dp->id, np->handle); return 0; } cp->state = CAP_STATE_UNKNOWN; } return 0; conn_reset: ds_conn_reset(dp); return -ECONNRESET; } static void __send_ds_nack(struct ds_info *dp, u64 handle) { struct ds_data_nack nack = { .tag = { .type = DS_NACK, .len = (sizeof(struct ds_data_nack) - sizeof(struct ds_msg_tag)), }, .handle = handle, .result = DS_INV_HDL, }; __ds_send(dp->lp, &nack, sizeof(nack)); } static LIST_HEAD(ds_work_list); static DECLARE_WAIT_QUEUE_HEAD(ds_wait); struct ds_queue_entry { struct list_head list; struct ds_info *dp; int req_len; int __pad; u64 req[0]; }; static void process_ds_work(void) { struct ds_queue_entry *qp, *tmp; unsigned long flags; LIST_HEAD(todo); spin_lock_irqsave(&ds_lock, flags); list_splice_init(&ds_work_list, &todo); spin_unlock_irqrestore(&ds_lock, flags); list_for_each_entry_safe(qp, tmp, &todo, list) { struct ds_data *dpkt = (struct ds_data *) qp->req; struct ds_info *dp = qp->dp; struct ds_cap_state *cp = find_cap(dp, dpkt->handle); int req_len = qp->req_len; if (!cp) { printk(KERN_ERR "ds-%llu: Data for unknown " "handle %llu\n", dp->id, dpkt->handle); spin_lock_irqsave(&ds_lock, flags); __send_ds_nack(dp, dpkt->handle); spin_unlock_irqrestore(&ds_lock, flags); } else { cp->data(dp, cp, dpkt, req_len); } list_del(&qp->list); kfree(qp); } } static int ds_thread(void *__unused) { DEFINE_WAIT(wait); while (1) { prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE); if (list_empty(&ds_work_list)) schedule(); finish_wait(&ds_wait, &wait); if (kthread_should_stop()) break; process_ds_work(); } return 0; } static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) { struct ds_data *dpkt = (struct ds_data *) pkt; struct ds_queue_entry *qp; qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); if (!qp) { __send_ds_nack(dp, dpkt->handle); } else { qp->dp = dp; memcpy(&qp->req, pkt, len); list_add_tail(&qp->list, &ds_work_list); wake_up(&ds_wait); } return 0; } static void ds_up(struct ds_info *dp) { struct ldc_channel *lp = dp->lp; struct ds_ver_req req; int err; req.tag.type = DS_INIT_REQ; req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); req.ver.major = 1; req.ver.minor = 0; err = __ds_send(lp, &req, sizeof(req)); if (err > 0) dp->hs_state = DS_HS_START; } static void ds_reset(struct ds_info *dp) { int i; dp->hs_state = 0; for (i = 0; i < dp->num_ds_states; i++) { struct ds_cap_state *cp = &dp->ds_states[i]; cp->state = CAP_STATE_UNKNOWN; } } static void ds_event(void *arg, int event) { struct ds_info *dp = arg; struct ldc_channel *lp = dp->lp; unsigned long flags; int err; spin_lock_irqsave(&ds_lock, flags); if (event == LDC_EVENT_UP) { ds_up(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event == LDC_EVENT_RESET) { ds_reset(dp); spin_unlock_irqrestore(&ds_lock, flags); return; } if (event != LDC_EVENT_DATA_READY) { printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n", dp->id, event); spin_unlock_irqrestore(&ds_lock, flags); return; } err = 0; while (1) { struct ds_msg_tag *tag; err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err == 0) break; tag = dp->rcv_buf; err = ldc_read(lp, tag + 1, tag->len); if (unlikely(err < 0)) { if (err == -ECONNRESET) ds_conn_reset(dp); break; } if (err < tag->len) break; if (tag->type < DS_DATA) err = ds_handshake(dp, dp->rcv_buf); else err = ds_data(dp, dp->rcv_buf, sizeof(*tag) + err); if (err == -ECONNRESET) break; } spin_unlock_irqrestore(&ds_lock, flags); } static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) { static int ds_version_printed; struct ldc_channel_config ds_cfg = { .event = ds_event, .mtu = 4096, .mode = LDC_MODE_STREAM, }; struct mdesc_handle *hp; struct ldc_channel *lp; struct ds_info *dp; const u64 *val; int err, i; if (ds_version_printed++ == 0) printk(KERN_INFO "%s", version); dp = kzalloc(sizeof(*dp), GFP_KERNEL); err = -ENOMEM; if (!dp) goto out_err; hp = mdesc_grab(); val = mdesc_get_property(hp, vdev->mp, "id", NULL); if (val) dp->id = *val; mdesc_release(hp); dp->rcv_buf = kzalloc(4096, GFP_KERNEL); if (!dp->rcv_buf) goto out_free_dp; dp->rcv_buf_len = 4096; dp->ds_states = kmemdup(ds_states_template, sizeof(ds_states_template), GFP_KERNEL); if (!dp->ds_states) goto out_free_rcv_buf; dp->num_ds_states = ARRAY_SIZE(ds_states_template); for (i = 0; i < dp->num_ds_states; i++) dp->ds_states[i].handle = ((u64)i << 32); ds_cfg.tx_irq = vdev->tx_irq; ds_cfg.rx_irq = vdev->rx_irq; lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out_free_ds_states; } dp->lp = lp; err = ldc_bind(lp, "DS"); if (err) goto out_free_ldc; spin_lock_irq(&ds_lock); dp->next = ds_info_list; ds_info_list = dp; spin_unlock_irq(&ds_lock); return err; out_free_ldc: ldc_free(dp->lp); out_free_ds_states: kfree(dp->ds_states); out_free_rcv_buf: kfree(dp->rcv_buf); out_free_dp: kfree(dp); out_err: return err; } static int ds_remove(struct vio_dev *vdev) { return 0; } static const struct vio_device_id ds_match[] = { { .type = "domain-services-port", }, {}, }; static struct vio_driver ds_driver = { .id_table = ds_match, .probe = ds_probe, .remove = ds_remove, .name = "ds", }; static int __init ds_init(void) { unsigned long hv_ret, major, minor; if (tlb_type == hypervisor) { hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); if (hv_ret == HV_EOK) { pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", major, minor); reboot_data_supported = 1; } } kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); } fs_initcall(ds_init);
gpl-2.0
ResurrectionRemix-Devices/android_kernel_oneplus_msm8974
tools/usb/ffs-test.c
709
12233
/* * ffs-test.c.c -- user mode filesystem api for usb composite function * * Copyright (C) 2010 Samsung Electronics * Author: Michal Nazarewicz <mina86@mina86.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* $(CROSS_COMPILE)cc -Wall -Wextra -g -o ffs-test ffs-test.c -lpthread */ #define _BSD_SOURCE /* for endian.h */ #include <endian.h> #include <errno.h> #include <fcntl.h> #include <pthread.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/ioctl.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <tools/le_byteshift.h> #include "../../include/linux/usb/functionfs.h" /******************** Little Endian Handling ********************************/ #define cpu_to_le16(x) htole16(x) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define le16_to_cpu(x) le16toh(x) /******************** Messages and Errors ***********************************/ static const char argv0[] = "ffs-test"; static unsigned verbosity = 7; static void _msg(unsigned level, const char *fmt, ...) { if (level < 2) level = 2; else if (level > 7) level = 7; if (level <= verbosity) { static const char levels[8][6] = { [2] = "crit:", [3] = "err: ", [4] = "warn:", [5] = "note:", [6] = "info:", [7] = "dbg: " }; int _errno = errno; va_list ap; fprintf(stderr, "%s: %s ", argv0, levels[level]); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); if (fmt[strlen(fmt) - 1] != '\n') { char buffer[128]; strerror_r(_errno, buffer, sizeof buffer); fprintf(stderr, ": (-%d) %s\n", _errno, buffer); } fflush(stderr); } } #define die(...) (_msg(2, __VA_ARGS__), exit(1)) #define err(...) _msg(3, __VA_ARGS__) #define warn(...) _msg(4, __VA_ARGS__) #define note(...) _msg(5, __VA_ARGS__) #define info(...) _msg(6, __VA_ARGS__) #define debug(...) _msg(7, __VA_ARGS__) #define die_on(cond, ...) do { \ if (cond) \ die(__VA_ARGS__); \ } while (0) /******************** Descriptors and Strings *******************************/ static const struct { struct usb_functionfs_descs_head header; struct { struct usb_interface_descriptor intf; struct usb_endpoint_descriptor_no_audio sink; struct usb_endpoint_descriptor_no_audio source; } __attribute__((packed)) fs_descs, hs_descs; } __attribute__((packed)) descriptors = { .header = { .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), .length = cpu_to_le32(sizeof descriptors), .fs_count = cpu_to_le32(3), .hs_count = cpu_to_le32(3), }, .fs_descs = { .intf = { .bLength = sizeof descriptors.fs_descs.intf, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .iInterface = 1, }, .sink = { .bLength = sizeof descriptors.fs_descs.sink, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 1 | USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* .wMaxPacketSize = autoconfiguration (kernel) */ }, .source = { .bLength = sizeof descriptors.fs_descs.source, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 2 | USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, /* .wMaxPacketSize = autoconfiguration (kernel) */ }, }, .hs_descs = { .intf = { .bLength = sizeof descriptors.fs_descs.intf, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .iInterface = 1, }, .sink = { .bLength = sizeof descriptors.hs_descs.sink, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 1 | USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }, .source = { .bLength = sizeof descriptors.hs_descs.source, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = 2 | USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), .bInterval = 1, /* NAK every 1 uframe */ }, }, }; #define STR_INTERFACE_ "Source/Sink" static const struct { struct usb_functionfs_strings_head header; struct { __le16 code; const char str1[sizeof STR_INTERFACE_]; } __attribute__((packed)) lang0; } __attribute__((packed)) strings = { .header = { .magic = cpu_to_le32(FUNCTIONFS_STRINGS_MAGIC), .length = cpu_to_le32(sizeof strings), .str_count = cpu_to_le32(1), .lang_count = cpu_to_le32(1), }, .lang0 = { cpu_to_le16(0x0409), /* en-us */ STR_INTERFACE_, }, }; #define STR_INTERFACE strings.lang0.str1 /******************** Files and Threads Handling ****************************/ struct thread; static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes); static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes); static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes); static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes); static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes); static struct thread { const char *const filename; size_t buf_size; ssize_t (*in)(struct thread *, void *, size_t); const char *const in_name; ssize_t (*out)(struct thread *, const void *, size_t); const char *const out_name; int fd; pthread_t id; void *buf; ssize_t status; } threads[] = { { "ep0", 4 * sizeof(struct usb_functionfs_event), read_wrap, NULL, ep0_consume, "<consume>", 0, 0, NULL, 0 }, { "ep1", 8 * 1024, fill_in_buf, "<in>", write_wrap, NULL, 0, 0, NULL, 0 }, { "ep2", 8 * 1024, read_wrap, NULL, empty_out_buf, "<out>", 0, 0, NULL, 0 }, }; static void init_thread(struct thread *t) { t->buf = malloc(t->buf_size); die_on(!t->buf, "malloc"); t->fd = open(t->filename, O_RDWR); die_on(t->fd < 0, "%s", t->filename); } static void cleanup_thread(void *arg) { struct thread *t = arg; int ret, fd; fd = t->fd; if (t->fd < 0) return; t->fd = -1; /* test the FIFO ioctls (non-ep0 code paths) */ if (t != threads) { ret = ioctl(fd, FUNCTIONFS_FIFO_STATUS); if (ret < 0) { /* ENODEV reported after disconnect */ if (errno != ENODEV) err("%s: get fifo status", t->filename); } else if (ret) { warn("%s: unclaimed = %d\n", t->filename, ret); if (ioctl(fd, FUNCTIONFS_FIFO_FLUSH) < 0) err("%s: fifo flush", t->filename); } } if (close(fd) < 0) err("%s: close", t->filename); free(t->buf); t->buf = NULL; } static void *start_thread_helper(void *arg) { const char *name, *op, *in_name, *out_name; struct thread *t = arg; ssize_t ret; info("%s: starts\n", t->filename); in_name = t->in_name ? t->in_name : t->filename; out_name = t->out_name ? t->out_name : t->filename; pthread_cleanup_push(cleanup_thread, arg); for (;;) { pthread_testcancel(); ret = t->in(t, t->buf, t->buf_size); if (ret > 0) { ret = t->out(t, t->buf, ret); name = out_name; op = "write"; } else { name = in_name; op = "read"; } if (ret > 0) { /* nop */ } else if (!ret) { debug("%s: %s: EOF", name, op); break; } else if (errno == EINTR || errno == EAGAIN) { debug("%s: %s", name, op); } else { warn("%s: %s", name, op); break; } } pthread_cleanup_pop(1); t->status = ret; info("%s: ends\n", t->filename); return NULL; } static void start_thread(struct thread *t) { debug("%s: starting\n", t->filename); die_on(pthread_create(&t->id, NULL, start_thread_helper, t) < 0, "pthread_create(%s)", t->filename); } static void join_thread(struct thread *t) { int ret = pthread_join(t->id, NULL); if (ret < 0) err("%s: joining thread", t->filename); else debug("%s: joined\n", t->filename); } static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes) { return read(t->fd, buf, nbytes); } static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes) { return write(t->fd, buf, nbytes); } /******************** Empty/Fill buffer routines ****************************/ /* 0 -- stream of zeros, 1 -- i % 63, 2 -- pipe */ enum pattern { PAT_ZERO, PAT_SEQ, PAT_PIPE }; static enum pattern pattern; static ssize_t fill_in_buf(struct thread *ignore, void *buf, size_t nbytes) { size_t i; __u8 *p; (void)ignore; switch (pattern) { case PAT_ZERO: memset(buf, 0, nbytes); break; case PAT_SEQ: for (p = buf, i = 0; i < nbytes; ++i, ++p) *p = i % 63; break; case PAT_PIPE: return fread(buf, 1, nbytes, stdin); } return nbytes; } static ssize_t empty_out_buf(struct thread *ignore, const void *buf, size_t nbytes) { const __u8 *p; __u8 expected; ssize_t ret; size_t len; (void)ignore; switch (pattern) { case PAT_ZERO: expected = 0; for (p = buf, len = 0; len < nbytes; ++p, ++len) if (*p) goto invalid; break; case PAT_SEQ: for (p = buf, len = 0; len < nbytes; ++p, ++len) if (*p != len % 63) { expected = len % 63; goto invalid; } break; case PAT_PIPE: ret = fwrite(buf, nbytes, 1, stdout); if (ret > 0) fflush(stdout); break; invalid: err("bad OUT byte %zd, expected %02x got %02x\n", len, expected, *p); for (p = buf, len = 0; len < nbytes; ++p, ++len) { if (0 == (len % 32)) fprintf(stderr, "%4zd:", len); fprintf(stderr, " %02x", *p); if (31 == (len % 32)) fprintf(stderr, "\n"); } fflush(stderr); errno = EILSEQ; return -1; } return len; } /******************** Endpoints routines ************************************/ static void handle_setup(const struct usb_ctrlrequest *setup) { printf("bRequestType = %d\n", setup->bRequestType); printf("bRequest = %d\n", setup->bRequest); printf("wValue = %d\n", le16_to_cpu(setup->wValue)); printf("wIndex = %d\n", le16_to_cpu(setup->wIndex)); printf("wLength = %d\n", le16_to_cpu(setup->wLength)); } static ssize_t ep0_consume(struct thread *ignore, const void *buf, size_t nbytes) { static const char *const names[] = { [FUNCTIONFS_BIND] = "BIND", [FUNCTIONFS_UNBIND] = "UNBIND", [FUNCTIONFS_ENABLE] = "ENABLE", [FUNCTIONFS_DISABLE] = "DISABLE", [FUNCTIONFS_SETUP] = "SETUP", [FUNCTIONFS_SUSPEND] = "SUSPEND", [FUNCTIONFS_RESUME] = "RESUME", }; const struct usb_functionfs_event *event = buf; size_t n; (void)ignore; for (n = nbytes / sizeof *event; n; --n, ++event) switch (event->type) { case FUNCTIONFS_BIND: case FUNCTIONFS_UNBIND: case FUNCTIONFS_ENABLE: case FUNCTIONFS_DISABLE: case FUNCTIONFS_SETUP: case FUNCTIONFS_SUSPEND: case FUNCTIONFS_RESUME: printf("Event %s\n", names[event->type]); if (event->type == FUNCTIONFS_SETUP) handle_setup(&event->u.setup); break; default: printf("Event %03u (unknown)\n", event->type); } return nbytes; } static void ep0_init(struct thread *t) { ssize_t ret; info("%s: writing descriptors\n", t->filename); ret = write(t->fd, &descriptors, sizeof descriptors); die_on(ret < 0, "%s: write: descriptors", t->filename); info("%s: writing strings\n", t->filename); ret = write(t->fd, &strings, sizeof strings); die_on(ret < 0, "%s: write: strings", t->filename); } /******************** Main **************************************************/ int main(void) { unsigned i; /* XXX TODO: Argument parsing missing */ init_thread(threads); ep0_init(threads); for (i = 1; i < sizeof threads / sizeof *threads; ++i) init_thread(threads + i); for (i = 1; i < sizeof threads / sizeof *threads; ++i) start_thread(threads + i); start_thread_helper(threads); for (i = 1; i < sizeof threads / sizeof *threads; ++i) join_thread(threads + i); return 0; }
gpl-2.0
longqiany/linux
sound/isa/wavefront/wavefront.c
1221
19002
/* * ALSA card-level driver for Turtle Beach Wavefront cards * (Maui,Tropez,Tropez+) * * Copyright (c) 1997-1999 by Paul Barton-Davis <pbd@op.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/opl3.h> #include <sound/wss.h> #include <sound/snd_wavefront.h> MODULE_AUTHOR("Paul Barton-Davis <pbd@op.net>"); MODULE_DESCRIPTION("Turtle Beach Wavefront"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Turtle Beach,Maui/Tropez/Tropez+}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long cs4232_pcm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int cs4232_pcm_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,11,12,15 */ static long cs4232_mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int cs4232_mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 9,11,12,15 */ static long ics2115_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int ics2115_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 2,9,11,12,15 */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static bool use_cs4232_midi[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for WaveFront soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for WaveFront soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable WaveFront soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for WaveFront soundcards."); #endif module_param_array(cs4232_pcm_port, long, NULL, 0444); MODULE_PARM_DESC(cs4232_pcm_port, "Port # for CS4232 PCM interface."); module_param_array(cs4232_pcm_irq, int, NULL, 0444); MODULE_PARM_DESC(cs4232_pcm_irq, "IRQ # for CS4232 PCM interface."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for CS4232 PCM interface."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for CS4232 PCM interface."); module_param_array(cs4232_mpu_port, long, NULL, 0444); MODULE_PARM_DESC(cs4232_mpu_port, "port # for CS4232 MPU-401 interface."); module_param_array(cs4232_mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(cs4232_mpu_irq, "IRQ # for CS4232 MPU-401 interface."); module_param_array(ics2115_irq, int, NULL, 0444); MODULE_PARM_DESC(ics2115_irq, "IRQ # for ICS2115."); module_param_array(ics2115_port, long, NULL, 0444); MODULE_PARM_DESC(ics2115_port, "Port # for ICS2115."); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port #."); module_param_array(use_cs4232_midi, bool, NULL, 0444); MODULE_PARM_DESC(use_cs4232_midi, "Use CS4232 MPU-401 interface (inaccessibly located inside your computer)"); #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static struct pnp_card_device_id snd_wavefront_pnpids[] = { /* Tropez */ { .id = "CSC7532", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } }, /* Tropez+ */ { .id = "CSC7632", .devs = { { "CSC0000" }, { "CSC0010" }, { "PnPb006" }, { "CSC0004" } } }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_wavefront_pnpids); static int snd_wavefront_pnp (int dev, snd_wavefront_card_t *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; /* Check for each logical device. */ /* CS4232 chip (aka "windows sound system") is logical device 0 */ acard->wss = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->wss == NULL) return -EBUSY; /* there is a game port at logical device 1, but we ignore it completely */ /* the control interface is logical device 2, but we ignore it completely. in fact, nobody even seems to know what it does. */ /* Only configure the CS4232 MIDI interface if its been specifically requested. It is logical device 3. */ if (use_cs4232_midi[dev]) { acard->mpu = pnp_request_card_device(card, id->devs[2].id, NULL); if (acard->mpu == NULL) return -EBUSY; } /* The ICS2115 synth is logical device 4 */ acard->synth = pnp_request_card_device(card, id->devs[3].id, NULL); if (acard->synth == NULL) return -EBUSY; /* PCM/FM initialization */ pdev = acard->wss; /* An interesting note from the Tropez+ FAQ: Q. [Ports] Why is the base address of the WSS I/O ports off by 4? A. WSS I/O requires a block of 8 I/O addresses ("ports"). Of these, the first 4 are used to identify and configure the board. With the advent of PnP, these first 4 addresses have become obsolete, and software applications only use the last 4 addresses to control the codec chip. Therefore, the base address setting "skips past" the 4 unused addresses. */ err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP WSS pnp configure failure\n"); return err; } cs4232_pcm_port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); cs4232_pcm_irq[dev] = pnp_irq(pdev, 0); /* Synth initialization */ pdev = acard->synth; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP ICS2115 pnp configure failure\n"); return err; } ics2115_port[dev] = pnp_port_start(pdev, 0); ics2115_irq[dev] = pnp_irq(pdev, 0); /* CS4232 MPU initialization. Configure this only if explicitly requested, since its physically inaccessible and consumes another IRQ. */ if (use_cs4232_midi[dev]) { pdev = acard->mpu; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "PnP MPU401 pnp configure failure\n"); cs4232_mpu_port[dev] = SNDRV_AUTO_PORT; } else { cs4232_mpu_port[dev] = pnp_port_start(pdev, 0); cs4232_mpu_irq[dev] = pnp_irq(pdev, 0); } snd_printk (KERN_INFO "CS4232 MPU: port=0x%lx, irq=%i\n", cs4232_mpu_port[dev], cs4232_mpu_irq[dev]); } snd_printdd ("CS4232: pcm port=0x%lx, fm port=0x%lx, dma1=%i, dma2=%i, irq=%i\nICS2115: port=0x%lx, irq=%i\n", cs4232_pcm_port[dev], fm_port[dev], dma1[dev], dma2[dev], cs4232_pcm_irq[dev], ics2115_port[dev], ics2115_irq[dev]); return 0; } #endif /* CONFIG_PNP */ static irqreturn_t snd_wavefront_ics2115_interrupt(int irq, void *dev_id) { snd_wavefront_card_t *acard; acard = (snd_wavefront_card_t *) dev_id; if (acard == NULL) return IRQ_NONE; if (acard->wavefront.interrupts_are_midi) { snd_wavefront_midi_interrupt (acard); } else { snd_wavefront_internal_interrupt (acard); } return IRQ_HANDLED; } static struct snd_hwdep *snd_wavefront_new_synth(struct snd_card *card, int hw_dev, snd_wavefront_card_t *acard) { struct snd_hwdep *wavefront_synth; if (snd_wavefront_detect (acard) < 0) { return NULL; } if (snd_wavefront_start (&acard->wavefront) < 0) { return NULL; } if (snd_hwdep_new(card, "WaveFront", hw_dev, &wavefront_synth) < 0) return NULL; strcpy (wavefront_synth->name, "WaveFront (ICS2115) wavetable synthesizer"); wavefront_synth->ops.open = snd_wavefront_synth_open; wavefront_synth->ops.release = snd_wavefront_synth_release; wavefront_synth->ops.ioctl = snd_wavefront_synth_ioctl; return wavefront_synth; } static struct snd_hwdep *snd_wavefront_new_fx(struct snd_card *card, int hw_dev, snd_wavefront_card_t *acard, unsigned long port) { struct snd_hwdep *fx_processor; if (snd_wavefront_fx_start (&acard->wavefront)) { snd_printk (KERN_ERR "cannot initialize YSS225 FX processor"); return NULL; } if (snd_hwdep_new (card, "YSS225", hw_dev, &fx_processor) < 0) return NULL; sprintf (fx_processor->name, "YSS225 FX Processor at 0x%lx", port); fx_processor->ops.open = snd_wavefront_fx_open; fx_processor->ops.release = snd_wavefront_fx_release; fx_processor->ops.ioctl = snd_wavefront_fx_ioctl; return fx_processor; } static snd_wavefront_mpu_id internal_id = internal_mpu; static snd_wavefront_mpu_id external_id = external_mpu; static struct snd_rawmidi *snd_wavefront_new_midi(struct snd_card *card, int midi_dev, snd_wavefront_card_t *acard, unsigned long port, snd_wavefront_mpu_id mpu) { struct snd_rawmidi *rmidi; static int first = 1; if (first) { first = 0; acard->wavefront.midi.base = port; if (snd_wavefront_midi_start (acard)) { snd_printk (KERN_ERR "cannot initialize MIDI interface\n"); return NULL; } } if (snd_rawmidi_new (card, "WaveFront MIDI", midi_dev, 1, 1, &rmidi) < 0) return NULL; if (mpu == internal_mpu) { strcpy(rmidi->name, "WaveFront MIDI (Internal)"); rmidi->private_data = &internal_id; } else { strcpy(rmidi->name, "WaveFront MIDI (External)"); rmidi->private_data = &external_id; } snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_wavefront_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_wavefront_midi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; return rmidi; } static void snd_wavefront_free(struct snd_card *card) { snd_wavefront_card_t *acard = (snd_wavefront_card_t *)card->private_data; if (acard) { release_and_free_resource(acard->wavefront.res_base); if (acard->wavefront.irq > 0) free_irq(acard->wavefront.irq, (void *)acard); } } static int snd_wavefront_card_new(struct device *pdev, int dev, struct snd_card **cardp) { struct snd_card *card; snd_wavefront_card_t *acard; int err; err = snd_card_new(pdev, index[dev], id[dev], THIS_MODULE, sizeof(snd_wavefront_card_t), &card); if (err < 0) return err; acard = card->private_data; acard->wavefront.irq = -1; spin_lock_init(&acard->wavefront.irq_lock); init_waitqueue_head(&acard->wavefront.interrupt_sleeper); spin_lock_init(&acard->wavefront.midi.open); spin_lock_init(&acard->wavefront.midi.virtual); acard->wavefront.card = card; card->private_free = snd_wavefront_free; *cardp = card; return 0; } static int snd_wavefront_probe (struct snd_card *card, int dev) { snd_wavefront_card_t *acard = card->private_data; struct snd_wss *chip; struct snd_hwdep *wavefront_synth; struct snd_rawmidi *ics2115_internal_rmidi = NULL; struct snd_rawmidi *ics2115_external_rmidi = NULL; struct snd_hwdep *fx_processor; int hw_dev = 0, midi_dev = 0, err; /* --------- PCM --------------- */ err = snd_wss_create(card, cs4232_pcm_port[dev], -1, cs4232_pcm_irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (err < 0) { snd_printk(KERN_ERR "can't allocate WSS device\n"); return err; } err = snd_wss_pcm(chip, 0); if (err < 0) return err; err = snd_wss_timer(chip, 0); if (err < 0) return err; /* ---------- OPL3 synth --------- */ if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { struct snd_opl3 *opl3; err = snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_OPL3_CS, 0, &opl3); if (err < 0) { snd_printk (KERN_ERR "can't allocate or detect OPL3 synth\n"); return err; } err = snd_opl3_hwdep_new(opl3, hw_dev, 1, NULL); if (err < 0) return err; hw_dev++; } /* ------- ICS2115 Wavetable synth ------- */ acard->wavefront.res_base = request_region(ics2115_port[dev], 16, "ICS2115"); if (acard->wavefront.res_base == NULL) { snd_printk(KERN_ERR "unable to grab ICS2115 i/o region 0x%lx-0x%lx\n", ics2115_port[dev], ics2115_port[dev] + 16 - 1); return -EBUSY; } if (request_irq(ics2115_irq[dev], snd_wavefront_ics2115_interrupt, 0, "ICS2115", acard)) { snd_printk(KERN_ERR "unable to use ICS2115 IRQ %d\n", ics2115_irq[dev]); return -EBUSY; } acard->wavefront.irq = ics2115_irq[dev]; acard->wavefront.base = ics2115_port[dev]; wavefront_synth = snd_wavefront_new_synth(card, hw_dev, acard); if (wavefront_synth == NULL) { snd_printk (KERN_ERR "can't create WaveFront synth device\n"); return -ENOMEM; } strcpy (wavefront_synth->name, "ICS2115 Wavetable MIDI Synthesizer"); wavefront_synth->iface = SNDRV_HWDEP_IFACE_ICS2115; hw_dev++; /* --------- Mixer ------------ */ err = snd_wss_mixer(chip); if (err < 0) { snd_printk (KERN_ERR "can't allocate mixer device\n"); return err; } /* -------- CS4232 MPU-401 interface -------- */ if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) { err = snd_mpu401_uart_new(card, midi_dev, MPU401_HW_CS4232, cs4232_mpu_port[dev], 0, cs4232_mpu_irq[dev], NULL); if (err < 0) { snd_printk (KERN_ERR "can't allocate CS4232 MPU-401 device\n"); return err; } midi_dev++; } /* ------ ICS2115 internal MIDI ------------ */ if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) { ics2115_internal_rmidi = snd_wavefront_new_midi (card, midi_dev, acard, ics2115_port[dev], internal_mpu); if (ics2115_internal_rmidi == NULL) { snd_printk (KERN_ERR "can't setup ICS2115 internal MIDI device\n"); return -ENOMEM; } midi_dev++; } /* ------ ICS2115 external MIDI ------------ */ if (ics2115_port[dev] > 0 && ics2115_port[dev] != SNDRV_AUTO_PORT) { ics2115_external_rmidi = snd_wavefront_new_midi (card, midi_dev, acard, ics2115_port[dev], external_mpu); if (ics2115_external_rmidi == NULL) { snd_printk (KERN_ERR "can't setup ICS2115 external MIDI device\n"); return -ENOMEM; } midi_dev++; } /* FX processor for Tropez+ */ if (acard->wavefront.has_fx) { fx_processor = snd_wavefront_new_fx (card, hw_dev, acard, ics2115_port[dev]); if (fx_processor == NULL) { snd_printk (KERN_ERR "can't setup FX device\n"); return -ENOMEM; } hw_dev++; strcpy(card->driver, "Tropez+"); strcpy(card->shortname, "Turtle Beach Tropez+"); } else { /* Need a way to distinguish between Maui and Tropez */ strcpy(card->driver, "WaveFront"); strcpy(card->shortname, "Turtle Beach WaveFront"); } /* ----- Register the card --------- */ /* Not safe to include "Turtle Beach" in longname, due to length restrictions */ sprintf(card->longname, "%s PCM 0x%lx irq %d dma %d", card->driver, chip->port, cs4232_pcm_irq[dev], dma1[dev]); if (dma2[dev] >= 0 && dma2[dev] < 8) sprintf(card->longname + strlen(card->longname), "&%d", dma2[dev]); if (cs4232_mpu_port[dev] > 0 && cs4232_mpu_port[dev] != SNDRV_AUTO_PORT) { sprintf (card->longname + strlen (card->longname), " MPU-401 0x%lx irq %d", cs4232_mpu_port[dev], cs4232_mpu_irq[dev]); } sprintf (card->longname + strlen (card->longname), " SYNTH 0x%lx irq %d", ics2115_port[dev], ics2115_irq[dev]); return snd_card_register(card); } static int snd_wavefront_isa_match(struct device *pdev, unsigned int dev) { if (!enable[dev]) return 0; #ifdef CONFIG_PNP if (isapnp[dev]) return 0; #endif if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify CS4232 port\n"); return 0; } if (ics2115_port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "specify ICS2115 port\n"); return 0; } return 1; } static int snd_wavefront_isa_probe(struct device *pdev, unsigned int dev) { struct snd_card *card; int err; err = snd_wavefront_card_new(pdev, dev, &card); if (err < 0) return err; if ((err = snd_wavefront_probe(card, dev)) < 0) { snd_card_free(card); return err; } dev_set_drvdata(pdev, card); return 0; } static int snd_wavefront_isa_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); return 0; } #define DEV_NAME "wavefront" static struct isa_driver snd_wavefront_driver = { .match = snd_wavefront_isa_match, .probe = snd_wavefront_isa_probe, .remove = snd_wavefront_isa_remove, /* FIXME: suspend, resume */ .driver = { .name = DEV_NAME }, }; #ifdef CONFIG_PNP static int snd_wavefront_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_wavefront_card_new(&pcard->card->dev, dev, &card); if (res < 0) return res; if (snd_wavefront_pnp (dev, card->private_data, pcard, pid) < 0) { if (cs4232_pcm_port[dev] == SNDRV_AUTO_PORT) { snd_printk (KERN_ERR "isapnp detection failed\n"); snd_card_free (card); return -ENODEV; } } if ((res = snd_wavefront_probe(card, dev)) < 0) return res; pnp_set_card_drvdata(pcard, card); dev++; return 0; } static void snd_wavefront_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static struct pnp_card_driver wavefront_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "wavefront", .id_table = snd_wavefront_pnpids, .probe = snd_wavefront_pnp_detect, .remove = snd_wavefront_pnp_remove, /* FIXME: suspend,resume */ }; #endif /* CONFIG_PNP */ static int __init alsa_card_wavefront_init(void) { int err; err = isa_register_driver(&snd_wavefront_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&wavefront_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit alsa_card_wavefront_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&wavefront_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_wavefront_driver); } module_init(alsa_card_wavefront_init) module_exit(alsa_card_wavefront_exit)
gpl-2.0
Sublime-Development/kernel_flounder
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
2245
5446
/* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <panto@intracom.gr> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/platform_device.h> #include <linux/mdio-bitbang.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include "fs_enet.h" struct bb_info { struct mdiobb_ctrl ctrl; __be32 __iomem *dir; __be32 __iomem *dat; u32 mdio_msk; u32 mdc_msk; }; /* FIXME: If any other users of GPIO crop up, then these will have to * have some sort of global synchronization to avoid races with other * pins on the same port. The ideal solution would probably be to * bind the ports to a GPIO driver, and have this be a client of it. */ static inline void bb_set(u32 __iomem *p, u32 m) { out_be32(p, in_be32(p) | m); } static inline void bb_clr(u32 __iomem *p, u32 m) { out_be32(p, in_be32(p) & ~m); } static inline int bb_read(u32 __iomem *p, u32 m) { return (in_be32(p) & m) != 0; } static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (dir) bb_set(bitbang->dir, bitbang->mdio_msk); else bb_clr(bitbang->dir, bitbang->mdio_msk); /* Read back to flush the write. */ in_be32(bitbang->dir); } static inline int mdio_read(struct mdiobb_ctrl *ctrl) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); return bb_read(bitbang->dat, bitbang->mdio_msk); } static inline void mdio(struct mdiobb_ctrl *ctrl, int what) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (what) bb_set(bitbang->dat, bitbang->mdio_msk); else bb_clr(bitbang->dat, bitbang->mdio_msk); /* Read back to flush the write. */ in_be32(bitbang->dat); } static inline void mdc(struct mdiobb_ctrl *ctrl, int what) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); if (what) bb_set(bitbang->dat, bitbang->mdc_msk); else bb_clr(bitbang->dat, bitbang->mdc_msk); /* Read back to flush the write. */ in_be32(bitbang->dat); } static struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = mdc, .set_mdio_dir = mdio_dir, .set_mdio_data = mdio, .get_mdio_data = mdio_read, }; static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) { struct resource res; const u32 *data; int mdio_pin, mdc_pin, len; struct bb_info *bitbang = bus->priv; int ret = of_address_to_resource(np, 0, &res); if (ret) return ret; if (resource_size(&res) <= 13) return -ENODEV; /* This should really encode the pin number as well, but all * we get is an int, and the odds of multiple bitbang mdio buses * is low enough that it's not worth going too crazy. */ snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); data = of_get_property(np, "fsl,mdio-pin", &len); if (!data || len != 4) return -ENODEV; mdio_pin = *data; data = of_get_property(np, "fsl,mdc-pin", &len); if (!data || len != 4) return -ENODEV; mdc_pin = *data; bitbang->dir = ioremap(res.start, resource_size(&res)); if (!bitbang->dir) return -ENOMEM; bitbang->dat = bitbang->dir + 4; bitbang->mdio_msk = 1 << (31 - mdio_pin); bitbang->mdc_msk = 1 << (31 - mdc_pin); return 0; } static int fs_enet_mdio_probe(struct platform_device *ofdev) { struct mii_bus *new_bus; struct bb_info *bitbang; int ret = -ENOMEM; bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); if (!bitbang) goto out; bitbang->ctrl.ops = &bb_ops; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) goto out_free_priv; new_bus->name = "CPM2 Bitbanged MII", ret = fs_mii_bitbang_init(new_bus, ofdev->dev.of_node); if (ret) goto out_free_bus; new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!new_bus->irq) { ret = -ENOMEM; goto out_unmap_regs; } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); if (ret) goto out_free_irqs; return 0; out_free_irqs: dev_set_drvdata(&ofdev->dev, NULL); kfree(new_bus->irq); out_unmap_regs: iounmap(bitbang->dir); out_free_bus: free_mdio_bitbang(new_bus); out_free_priv: kfree(bitbang); out: return ret; } static int fs_enet_mdio_remove(struct platform_device *ofdev) { struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); struct bb_info *bitbang = bus->priv; mdiobus_unregister(bus); dev_set_drvdata(&ofdev->dev, NULL); kfree(bus->irq); free_mdio_bitbang(bus); iounmap(bitbang->dir); kfree(bitbang); return 0; } static struct of_device_id fs_enet_mdio_bb_match[] = { { .compatible = "fsl,cpm2-mdio-bitbang", }, {}, }; MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); static struct platform_driver fs_enet_bb_mdio_driver = { .driver = { .name = "fsl-bb-mdio", .owner = THIS_MODULE, .of_match_table = fs_enet_mdio_bb_match, }, .probe = fs_enet_mdio_probe, .remove = fs_enet_mdio_remove, }; module_platform_driver(fs_enet_bb_mdio_driver);
gpl-2.0
playfulgod/kernel_lge_dory
drivers/staging/sm7xxfb/sm7xxfb.c
2245
24008
/* * Silicon Motion SM7XX frame buffer device * * Copyright (C) 2006 Silicon Motion Technology Corp. * Authors: Ge Wang, gewang@siliconmotion.com * Boyod boyod.yang@siliconmotion.com.cn * * Copyright (C) 2009 Lemote, Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * Copyright (C) 2011 Igalia, S.L. * Author: Javier M. Mellid <jmunhoz@igalia.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips */ #include <linux/io.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/console.h> #include <linux/screen_info.h> #ifdef CONFIG_PM #include <linux/pm.h> #endif #include "sm7xx.h" /* * Private structure */ struct smtcfb_info { struct pci_dev *pdev; struct fb_info fb; u16 chip_id; u8 chip_rev_id; void __iomem *lfb; /* linear frame buffer */ void __iomem *dp_regs; /* drawing processor control regs */ void __iomem *vp_regs; /* video processor control regs */ void __iomem *cp_regs; /* capture processor control regs */ void __iomem *mmio; /* memory map IO port */ u_int width; u_int height; u_int hz; u32 colreg[17]; }; void __iomem *smtc_RegBaseAddress; /* Memory Map IO starting address */ static struct fb_var_screeninfo smtcfb_var = { .xres = 1024, .yres = 600, .xres_virtual = 1024, .yres_virtual = 600, .bits_per_pixel = 16, .red = {16, 8, 0}, .green = {8, 8, 0}, .blue = {0, 8, 0}, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .vmode = FB_VMODE_NONINTERLACED, .nonstd = 0, .accel_flags = FB_ACCELF_TEXT, }; static struct fb_fix_screeninfo smtcfb_fix = { .id = "smXXXfb", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = 800 * 3, .accel = FB_ACCEL_SMI_LYNX, .type_aux = 0, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, }; struct vesa_mode { char index[6]; u16 lfb_width; u16 lfb_height; u16 lfb_depth; }; static struct vesa_mode vesa_mode_table[] = { {"0x301", 640, 480, 8}, {"0x303", 800, 600, 8}, {"0x305", 1024, 768, 8}, {"0x307", 1280, 1024, 8}, {"0x311", 640, 480, 16}, {"0x314", 800, 600, 16}, {"0x317", 1024, 768, 16}, {"0x31A", 1280, 1024, 16}, {"0x312", 640, 480, 24}, {"0x315", 800, 600, 24}, {"0x318", 1024, 768, 24}, {"0x31B", 1280, 1024, 24}, }; struct screen_info smtc_scr_info; /* process command line options, get vga parameter */ static int __init sm7xx_vga_setup(char *options) { int i; if (!options || !*options) return -EINVAL; smtc_scr_info.lfb_width = 0; smtc_scr_info.lfb_height = 0; smtc_scr_info.lfb_depth = 0; pr_debug("sm7xx_vga_setup = %s\n", options); for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) { if (strstr(options, vesa_mode_table[i].index)) { smtc_scr_info.lfb_width = vesa_mode_table[i].lfb_width; smtc_scr_info.lfb_height = vesa_mode_table[i].lfb_height; smtc_scr_info.lfb_depth = vesa_mode_table[i].lfb_depth; return 0; } } return -1; } __setup("vga=", sm7xx_vga_setup); static void sm712_setpalette(int regno, unsigned red, unsigned green, unsigned blue, struct fb_info *info) { /* set bit 5:4 = 01 (write LCD RAM only) */ smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10); smtc_mmiowb(regno, dac_reg); smtc_mmiowb(red >> 10, dac_val); smtc_mmiowb(green >> 10, dac_val); smtc_mmiowb(blue >> 10, dac_val); } /* chan_to_field * * convert a colour value into a field position * * from pxafb.c */ static inline unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } static int smtc_blank(int blank_mode, struct fb_info *info) { /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen On: HSync: On, VSync : On */ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77)); smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03)); break; case FB_BLANK_NORMAL: /* Screen Off: HSync: On, VSync : On Soft blank */ smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20))); smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30))); smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0))); smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01)); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen On: HSync: On, VSync : Off */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen On: HSync: Off, VSync : On */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; case FB_BLANK_POWERDOWN: /* Screen On: HSync: Off, VSync : Off */ smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20)); smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0))); smtc_seqw(0x6a, 0x0c); smtc_seqw(0x6b, 0x02); smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88)); smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30)); smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8)); smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01))); smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00)); smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80)); break; default: return -EINVAL; } return 0; } static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned trans, struct fb_info *info) { struct smtcfb_info *sfb; u32 val; sfb = info->par; if (regno > 255) return 1; switch (sfb->fb.fix.visual) { case FB_VISUAL_DIRECTCOLOR: case FB_VISUAL_TRUECOLOR: /* * 16/32 bit true-colour, use pseudo-palette for 16 base color */ if (regno < 16) { if (sfb->fb.var.bits_per_pixel == 16) { u32 *pal = sfb->fb.pseudo_palette; val = chan_to_field(red, &sfb->fb.var.red); val |= chan_to_field(green, \ &sfb->fb.var.green); val |= chan_to_field(blue, &sfb->fb.var.blue); #ifdef __BIG_ENDIAN pal[regno] = ((red & 0xf800) >> 8) | ((green & 0xe000) >> 13) | ((green & 0x1c00) << 3) | ((blue & 0xf800) >> 3); #else pal[regno] = val; #endif } else { u32 *pal = sfb->fb.pseudo_palette; val = chan_to_field(red, &sfb->fb.var.red); val |= chan_to_field(green, \ &sfb->fb.var.green); val |= chan_to_field(blue, &sfb->fb.var.blue); #ifdef __BIG_ENDIAN val = (val & 0xff00ff00 >> 8) | (val & 0x00ff00ff << 8); #endif pal[regno] = val; } } break; case FB_VISUAL_PSEUDOCOLOR: /* color depth 8 bit */ sm712_setpalette(regno, red, green, blue, info); break; default: return 1; /* unknown type */ } return 0; } #ifdef __BIG_ENDIAN static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; u32 *buffer, *dst; u32 __iomem *src; int c, i, cnt = 0, err = 0; unsigned long total_size; if (!info || !info->screen_base) return -ENODEV; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p >= total_size) return 0; if (count >= total_size) count = total_size; if (count + p > total_size) count = total_size - p; buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); if (!buffer) return -ENOMEM; src = (u32 __iomem *) (info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; dst = buffer; for (i = c >> 2; i--;) { *dst = fb_readl(src++); *dst = (*dst & 0xff00ff00 >> 8) | (*dst & 0x00ff00ff << 8); dst++; } if (c & 3) { u8 *dst8 = (u8 *) dst; u8 __iomem *src8 = (u8 __iomem *) src; for (i = c & 3; i--;) { if (i & 1) { *dst8++ = fb_readb(++src8); } else { *dst8++ = fb_readb(--src8); src8 += 2; } } src = (u32 __iomem *) src8; } if (copy_to_user(buf, buffer, c)) { err = -EFAULT; break; } *ppos += c; buf += c; cnt += c; count -= c; } kfree(buffer); return (err) ? err : cnt; } static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; u32 *buffer, *src; u32 __iomem *dst; int c, i, cnt = 0, err = 0; unsigned long total_size; if (!info || !info->screen_base) return -ENODEV; if (info->state != FBINFO_STATE_RUNNING) return -EPERM; total_size = info->screen_size; if (total_size == 0) total_size = info->fix.smem_len; if (p > total_size) return -EFBIG; if (count > total_size) { err = -EFBIG; count = total_size; } if (count + p > total_size) { if (!err) err = -ENOSPC; count = total_size - p; } buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); if (!buffer) return -ENOMEM; dst = (u32 __iomem *) (info->screen_base + p); if (info->fbops->fb_sync) info->fbops->fb_sync(info); while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; src = buffer; if (copy_from_user(src, buf, c)) { err = -EFAULT; break; } for (i = c >> 2; i--;) { fb_writel((*src & 0xff00ff00 >> 8) | (*src & 0x00ff00ff << 8), dst++); src++; } if (c & 3) { u8 *src8 = (u8 *) src; u8 __iomem *dst8 = (u8 __iomem *) dst; for (i = c & 3; i--;) { if (i & 1) { fb_writeb(*src8++, ++dst8); } else { fb_writeb(*src8++, --dst8); dst8 += 2; } } dst = (u32 __iomem *) dst8; } *ppos += c; buf += c; cnt += c; count -= c; } kfree(buffer); return (cnt) ? cnt : err; } #endif /* ! __BIG_ENDIAN */ static void sm7xx_set_timing(struct smtcfb_info *sfb) { int i = 0, j = 0; u32 m_nScreenStride; dev_dbg(&sfb->pdev->dev, "sfb->width=%d sfb->height=%d " "sfb->fb.var.bits_per_pixel=%d sfb->hz=%d\n", sfb->width, sfb->height, sfb->fb.var.bits_per_pixel, sfb->hz); for (j = 0; j < numVGAModes; j++) { if (VGAMode[j].mmSizeX == sfb->width && VGAMode[j].mmSizeY == sfb->height && VGAMode[j].bpp == sfb->fb.var.bits_per_pixel && VGAMode[j].hz == sfb->hz) { dev_dbg(&sfb->pdev->dev, "VGAMode[j].mmSizeX=%d VGAMode[j].mmSizeY=%d " "VGAMode[j].bpp=%d VGAMode[j].hz=%d\n", VGAMode[j].mmSizeX, VGAMode[j].mmSizeY, VGAMode[j].bpp, VGAMode[j].hz); dev_dbg(&sfb->pdev->dev, "VGAMode index=%d\n", j); smtc_mmiowb(0x0, 0x3c6); smtc_seqw(0, 0x1); smtc_mmiowb(VGAMode[j].Init_MISC, 0x3c2); /* init SEQ register SR00 - SR04 */ for (i = 0; i < SIZE_SR00_SR04; i++) smtc_seqw(i, VGAMode[j].Init_SR00_SR04[i]); /* init SEQ register SR10 - SR24 */ for (i = 0; i < SIZE_SR10_SR24; i++) smtc_seqw(i + 0x10, VGAMode[j].Init_SR10_SR24[i]); /* init SEQ register SR30 - SR75 */ for (i = 0; i < SIZE_SR30_SR75; i++) if (((i + 0x30) != 0x62) \ && ((i + 0x30) != 0x6a) \ && ((i + 0x30) != 0x6b)) smtc_seqw(i + 0x30, VGAMode[j].Init_SR30_SR75[i]); /* init SEQ register SR80 - SR93 */ for (i = 0; i < SIZE_SR80_SR93; i++) smtc_seqw(i + 0x80, VGAMode[j].Init_SR80_SR93[i]); /* init SEQ register SRA0 - SRAF */ for (i = 0; i < SIZE_SRA0_SRAF; i++) smtc_seqw(i + 0xa0, VGAMode[j].Init_SRA0_SRAF[i]); /* init Graphic register GR00 - GR08 */ for (i = 0; i < SIZE_GR00_GR08; i++) smtc_grphw(i, VGAMode[j].Init_GR00_GR08[i]); /* init Attribute register AR00 - AR14 */ for (i = 0; i < SIZE_AR00_AR14; i++) smtc_attrw(i, VGAMode[j].Init_AR00_AR14[i]); /* init CRTC register CR00 - CR18 */ for (i = 0; i < SIZE_CR00_CR18; i++) smtc_crtcw(i, VGAMode[j].Init_CR00_CR18[i]); /* init CRTC register CR30 - CR4D */ for (i = 0; i < SIZE_CR30_CR4D; i++) smtc_crtcw(i + 0x30, VGAMode[j].Init_CR30_CR4D[i]); /* init CRTC register CR90 - CRA7 */ for (i = 0; i < SIZE_CR90_CRA7; i++) smtc_crtcw(i + 0x90, VGAMode[j].Init_CR90_CRA7[i]); } } smtc_mmiowb(0x67, 0x3c2); /* set VPR registers */ writel(0x0, sfb->vp_regs + 0x0C); writel(0x0, sfb->vp_regs + 0x40); /* set data width */ m_nScreenStride = (sfb->width * sfb->fb.var.bits_per_pixel) / 64; switch (sfb->fb.var.bits_per_pixel) { case 8: writel(0x0, sfb->vp_regs + 0x0); break; case 16: writel(0x00020000, sfb->vp_regs + 0x0); break; case 24: writel(0x00040000, sfb->vp_regs + 0x0); break; case 32: writel(0x00030000, sfb->vp_regs + 0x0); break; } writel((u32) (((m_nScreenStride + 2) << 16) | m_nScreenStride), sfb->vp_regs + 0x10); } static void smtc_set_timing(struct smtcfb_info *sfb) { switch (sfb->chip_id) { case 0x710: case 0x712: case 0x720: sm7xx_set_timing(sfb); break; } } void smtcfb_setmode(struct smtcfb_info *sfb) { switch (sfb->fb.var.bits_per_pixel) { case 32: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 4; sfb->fb.var.red.length = 8; sfb->fb.var.green.length = 8; sfb->fb.var.blue.length = 8; sfb->fb.var.red.offset = 16; sfb->fb.var.green.offset = 8; sfb->fb.var.blue.offset = 0; break; case 24: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 3; sfb->fb.var.red.length = 8; sfb->fb.var.green.length = 8; sfb->fb.var.blue.length = 8; sfb->fb.var.red.offset = 16; sfb->fb.var.green.offset = 8; sfb->fb.var.blue.offset = 0; break; case 8: sfb->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres; sfb->fb.var.red.length = 3; sfb->fb.var.green.length = 3; sfb->fb.var.blue.length = 2; sfb->fb.var.red.offset = 5; sfb->fb.var.green.offset = 2; sfb->fb.var.blue.offset = 0; break; case 16: default: sfb->fb.fix.visual = FB_VISUAL_TRUECOLOR; sfb->fb.fix.line_length = sfb->fb.var.xres * 2; sfb->fb.var.red.length = 5; sfb->fb.var.green.length = 6; sfb->fb.var.blue.length = 5; sfb->fb.var.red.offset = 11; sfb->fb.var.green.offset = 5; sfb->fb.var.blue.offset = 0; break; } sfb->width = sfb->fb.var.xres; sfb->height = sfb->fb.var.yres; sfb->hz = 60; smtc_set_timing(sfb); } static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { /* sanity checks */ if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; /* set valid default bpp */ if ((var->bits_per_pixel != 8) && (var->bits_per_pixel != 16) && (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32)) var->bits_per_pixel = 16; return 0; } static int smtc_set_par(struct fb_info *info) { smtcfb_setmode(info->par); return 0; } static struct fb_ops smtcfb_ops = { .owner = THIS_MODULE, .fb_check_var = smtc_check_var, .fb_set_par = smtc_set_par, .fb_setcolreg = smtc_setcolreg, .fb_blank = smtc_blank, .fb_fillrect = cfb_fillrect, .fb_imageblit = cfb_imageblit, .fb_copyarea = cfb_copyarea, #ifdef __BIG_ENDIAN .fb_read = smtcfb_read, .fb_write = smtcfb_write, #endif }; /* * alloc struct smtcfb_info and assign default values */ static struct smtcfb_info *smtc_alloc_fb_info(struct pci_dev *pdev) { struct smtcfb_info *sfb; sfb = kzalloc(sizeof(*sfb), GFP_KERNEL); if (!sfb) return NULL; sfb->pdev = pdev; sfb->fb.flags = FBINFO_FLAG_DEFAULT; sfb->fb.fbops = &smtcfb_ops; sfb->fb.fix = smtcfb_fix; sfb->fb.var = smtcfb_var; sfb->fb.pseudo_palette = sfb->colreg; sfb->fb.par = sfb; return sfb; } /* * free struct smtcfb_info */ static void smtc_free_fb_info(struct smtcfb_info *sfb) { kfree(sfb); } /* * Unmap in the memory mapped IO registers */ static void smtc_unmap_mmio(struct smtcfb_info *sfb) { if (sfb && smtc_RegBaseAddress) smtc_RegBaseAddress = NULL; } /* * Map in the screen memory */ static int smtc_map_smem(struct smtcfb_info *sfb, struct pci_dev *pdev, u_long smem_len) { sfb->fb.fix.smem_start = pci_resource_start(pdev, 0); #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) sfb->fb.fix.smem_start += 0x800000; #endif sfb->fb.fix.smem_len = smem_len; sfb->fb.screen_base = sfb->lfb; if (!sfb->fb.screen_base) { dev_err(&pdev->dev, "%s: unable to map screen memory\n", sfb->fb.fix.id); return -ENOMEM; } return 0; } /* * Unmap in the screen memory * */ static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb.screen_base) { iounmap(sfb->fb.screen_base); sfb->fb.screen_base = NULL; } } /* * We need to wake up the device and make sure its in linear memory mode. */ static inline void sm7xx_init_hw(void) { outb_p(0x18, 0x3c4); outb_p(0x11, 0x3c5); } static int smtcfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct smtcfb_info *sfb; u_long smem_size = 0x00800000; /* default 8MB */ int err; unsigned long mmio_base; dev_info(&pdev->dev, "Silicon Motion display driver."); err = pci_enable_device(pdev); /* enable SMTC chip */ if (err) return err; sprintf(smtcfb_fix.id, "sm%Xfb", ent->device); sfb = smtc_alloc_fb_info(pdev); if (!sfb) { err = -ENOMEM; goto failed_free; } sfb->chip_id = ent->device; pci_set_drvdata(pdev, sfb); sm7xx_init_hw(); /* get mode parameter from smtc_scr_info */ if (smtc_scr_info.lfb_width != 0) { sfb->fb.var.xres = smtc_scr_info.lfb_width; sfb->fb.var.yres = smtc_scr_info.lfb_height; sfb->fb.var.bits_per_pixel = smtc_scr_info.lfb_depth; } else { /* default resolution 1024x600 16bit mode */ sfb->fb.var.xres = SCREEN_X_RES; sfb->fb.var.yres = SCREEN_Y_RES; sfb->fb.var.bits_per_pixel = SCREEN_BPP; } #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 24) sfb->fb.var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32); #endif /* Map address and memory detection */ mmio_base = pci_resource_start(pdev, 0); pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id); switch (sfb->chip_id) { case 0x710: case 0x712: sfb->fb.fix.mmio_start = mmio_base + 0x00400000; sfb->fb.fix.mmio_len = 0x00400000; smem_size = SM712_VIDEOMEMORYSIZE; #ifdef __BIG_ENDIAN sfb->lfb = ioremap(mmio_base, 0x00c00000); #else sfb->lfb = ioremap(mmio_base, 0x00800000); #endif sfb->mmio = (smtc_RegBaseAddress = sfb->lfb + 0x00700000); sfb->dp_regs = sfb->lfb + 0x00408000; sfb->vp_regs = sfb->lfb + 0x0040c000; #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) { sfb->lfb += 0x800000; dev_info(&pdev->dev, "sfb->lfb=%p", sfb->lfb); } #endif if (!smtc_RegBaseAddress) { dev_err(&pdev->dev, "%s: unable to map memory mapped IO!", sfb->fb.fix.id); err = -ENOMEM; goto failed_fb; } /* set MCLK = 14.31818 * (0x16 / 0x2) */ smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x62, 0x3e); /* enable PCI burst */ smtc_seqw(0x17, 0x20); /* enable word swap */ #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) smtc_seqw(0x17, 0x30); #endif break; case 0x720: sfb->fb.fix.mmio_start = mmio_base; sfb->fb.fix.mmio_len = 0x00200000; smem_size = SM722_VIDEOMEMORYSIZE; sfb->dp_regs = ioremap(mmio_base, 0x00a00000); sfb->lfb = sfb->dp_regs + 0x00200000; sfb->mmio = (smtc_RegBaseAddress = sfb->dp_regs + 0x000c0000); sfb->vp_regs = sfb->dp_regs + 0x800; smtc_seqw(0x62, 0xff); smtc_seqw(0x6a, 0x0d); smtc_seqw(0x6b, 0x02); break; default: dev_err(&pdev->dev, "No valid Silicon Motion display chip was detected!"); goto failed_fb; } /* can support 32 bpp */ if (15 == sfb->fb.var.bits_per_pixel) sfb->fb.var.bits_per_pixel = 16; sfb->fb.var.xres_virtual = sfb->fb.var.xres; sfb->fb.var.yres_virtual = sfb->fb.var.yres; err = smtc_map_smem(sfb, pdev, smem_size); if (err) goto failed; smtcfb_setmode(sfb); err = register_framebuffer(&sfb->fb); if (err < 0) goto failed; dev_info(&pdev->dev, "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.", sfb->chip_id, sfb->chip_rev_id, sfb->fb.var.xres, sfb->fb.var.yres, sfb->fb.var.bits_per_pixel); return 0; failed: dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail."); smtc_unmap_smem(sfb); smtc_unmap_mmio(sfb); failed_fb: smtc_free_fb_info(sfb); failed_free: pci_disable_device(pdev); return err; } /* * 0x710 (LynxEM) * 0x712 (LynxEM+) * 0x720 (Lynx3DM, Lynx3DM+) */ static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = { { PCI_DEVICE(0x126f, 0x710), }, { PCI_DEVICE(0x126f, 0x712), }, { PCI_DEVICE(0x126f, 0x720), }, {0,} }; static void smtcfb_pci_remove(struct pci_dev *pdev) { struct smtcfb_info *sfb; sfb = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); smtc_unmap_smem(sfb); smtc_unmap_mmio(sfb); unregister_framebuffer(&sfb->fb); smtc_free_fb_info(sfb); } #ifdef CONFIG_PM static int smtcfb_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct smtcfb_info *sfb; sfb = pci_get_drvdata(pdev); /* set the hw in sleep mode use external clock and self memory refresh * so that we can turn off internal PLLs later on */ smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0)); smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7)); console_lock(); fb_set_suspend(&sfb->fb, 1); console_unlock(); /* additionally turn off all function blocks including internal PLLs */ smtc_seqw(0x21, 0xff); return 0; } static int smtcfb_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct smtcfb_info *sfb; sfb = pci_get_drvdata(pdev); /* reinit hardware */ sm7xx_init_hw(); switch (sfb->chip_id) { case 0x710: case 0x712: /* set MCLK = 14.31818 * (0x16 / 0x2) */ smtc_seqw(0x6a, 0x16); smtc_seqw(0x6b, 0x02); smtc_seqw(0x62, 0x3e); /* enable PCI burst */ smtc_seqw(0x17, 0x20); #ifdef __BIG_ENDIAN if (sfb->fb.var.bits_per_pixel == 32) smtc_seqw(0x17, 0x30); #endif break; case 0x720: smtc_seqw(0x62, 0xff); smtc_seqw(0x6a, 0x0d); smtc_seqw(0x6b, 0x02); break; } smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0)); smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb)); smtcfb_setmode(sfb); console_lock(); fb_set_suspend(&sfb->fb, 0); console_unlock(); return 0; } static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume); #define SM7XX_PM_OPS (&sm7xx_pm_ops) #else /* !CONFIG_PM */ #define SM7XX_PM_OPS NULL #endif /* !CONFIG_PM */ static struct pci_driver smtcfb_driver = { .name = "smtcfb", .id_table = smtcfb_pci_table, .probe = smtcfb_pci_probe, .remove = smtcfb_pci_remove, .driver.pm = SM7XX_PM_OPS, }; module_pci_driver(smtcfb_driver); MODULE_AUTHOR("Siliconmotion "); MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards"); MODULE_LICENSE("GPL");
gpl-2.0
faux123/Galaxy_S5
arch/powerpc/kernel/idle.c
2501
3935
/* * Idle daemon for PowerPC. Idle daemon will handle any action * that needs to be taken when the system becomes idle. * * Originally written by Cort Dougan (cort@cs.nmt.edu). * Subsequent 32-bit hacking by Tom Rini, Armin Kuster, * Paul Mackerras and others. * * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com> * * Additional shared processor, SMT, and firmware support * Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com> * * 32-bit and 64-bit versions merged by Paul Mackerras <paulus@samba.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/sysctl.h> #include <linux/tick.h> #include <asm/processor.h> #include <asm/cputable.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/runlatch.h> #include <asm/smp.h> #ifdef CONFIG_HOTPLUG_CPU #define cpu_should_die() cpu_is_offline(smp_processor_id()) #else #define cpu_should_die() 0 #endif unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(cpuidle_disable); static int __init powersave_off(char *arg) { ppc_md.power_save = NULL; cpuidle_disable = IDLE_POWERSAVE_OFF; return 0; } __setup("powersave=off", powersave_off); /* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); start_critical_timings(); /* Some power_save functions return with * interrupts enabled, some don't. */ if (irqs_disabled()) local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); rcu_idle_exit(); tick_nohz_idle_exit(); if (cpu_should_die()) { sched_preempt_enable_no_resched(); cpu_die(); } schedule_preempt_disabled(); } } /* * cpu_idle_wait - Used to ensure that all the CPUs come out of the old * idle loop and start using the new idle loop. * Required while changing idle handler on SMP systems. * Caller must have changed idle handler to the new value before the call. * This window may be larger on shared systems. */ void cpu_idle_wait(void) { int cpu; smp_mb(); /* kick all the CPUs so that they exit out of old idle routine */ get_online_cpus(); for_each_online_cpu(cpu) { if (cpu != smp_processor_id()) smp_send_reschedule(cpu); } put_online_cpus(); } EXPORT_SYMBOL_GPL(cpu_idle_wait); int powersave_nap; #ifdef CONFIG_SYSCTL /* * Register the sysctl to set/clear powersave_nap. */ static ctl_table powersave_nap_ctl_table[]={ { .procname = "powersave-nap", .data = &powersave_nap, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; static ctl_table powersave_nap_sysctl_root[] = { { .procname = "kernel", .mode = 0555, .child = powersave_nap_ctl_table, }, {} }; static int __init register_powersave_nap_sysctl(void) { register_sysctl_table(powersave_nap_sysctl_root); return 0; } __initcall(register_powersave_nap_sysctl); #endif
gpl-2.0
puppies/fl2440
linux-3.10.33/fs/cifs/cifs_spnego.c
2757
4952
/* * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS * * Copyright (c) 2007 Red Hat, Inc. * Author(s): Jeff Layton (jlayton@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <keys/user-type.h> #include <linux/key-type.h> #include <linux/inet.h> #include "cifsglob.h" #include "cifs_spnego.h" #include "cifs_debug.h" /* create a new cifs key */ static int cifs_spnego_key_instantiate(struct key *key, struct key_preparsed_payload *prep) { char *payload; int ret; ret = -ENOMEM; payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL); if (!payload) goto error; /* attach the data */ key->payload.data = payload; ret = 0; error: return ret; } static void cifs_spnego_key_destroy(struct key *key) { kfree(key->payload.data); } /* * keytype for CIFS spnego keys */ struct key_type cifs_spnego_key_type = { .name = "cifs.spnego", .instantiate = cifs_spnego_key_instantiate, .match = user_match, .destroy = cifs_spnego_key_destroy, .describe = user_describe, }; /* length of longest version string e.g. strlen("ver=0xFF") */ #define MAX_VER_STR_LEN 8 /* length of longest security mechanism name, eg in future could have * strlen(";sec=ntlmsspi") */ #define MAX_MECH_STR_LEN 13 /* strlen of "host=" */ #define HOST_KEY_LEN 5 /* strlen of ";ip4=" or ";ip6=" */ #define IP_KEY_LEN 5 /* strlen of ";uid=0x" */ #define UID_KEY_LEN 7 /* strlen of ";creduid=0x" */ #define CREDUID_KEY_LEN 11 /* strlen of ";user=" */ #define USER_KEY_LEN 6 /* strlen of ";pid=0x" */ #define PID_KEY_LEN 7 /* get a key struct with a SPNEGO security blob, suitable for session setup */ struct key * cifs_get_spnego_key(struct cifs_ses *sesInfo) { struct TCP_Server_Info *server = sesInfo->server; struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; char *description, *dp; size_t desc_len; struct key *spnego_key; const char *hostname = server->hostname; /* length of fields (with semicolons): ver=0xyz ip4=ipaddress host=hostname sec=mechanism uid=0xFF user=username */ desc_len = MAX_VER_STR_LEN + HOST_KEY_LEN + strlen(hostname) + IP_KEY_LEN + INET6_ADDRSTRLEN + MAX_MECH_STR_LEN + UID_KEY_LEN + (sizeof(uid_t) * 2) + CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; if (sesInfo->user_name) desc_len += USER_KEY_LEN + strlen(sesInfo->user_name); spnego_key = ERR_PTR(-ENOMEM); description = kzalloc(desc_len, GFP_KERNEL); if (description == NULL) goto out; dp = description; /* start with version and hostname portion of UNC string */ spnego_key = ERR_PTR(-EINVAL); sprintf(dp, "ver=0x%x;host=%s;", CIFS_SPNEGO_UPCALL_VERSION, hostname); dp = description + strlen(description); /* add the server address */ if (server->dstaddr.ss_family == AF_INET) sprintf(dp, "ip4=%pI4", &sa->sin_addr); else if (server->dstaddr.ss_family == AF_INET6) sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); else goto out; dp = description + strlen(description); /* for now, only sec=krb5 and sec=mskrb5 are valid */ if (server->sec_kerberos) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); else goto out; dp = description + strlen(description); sprintf(dp, ";uid=0x%x", from_kuid_munged(&init_user_ns, sesInfo->linux_uid)); dp = description + strlen(description); sprintf(dp, ";creduid=0x%x", from_kuid_munged(&init_user_ns, sesInfo->cred_uid)); if (sesInfo->user_name) { dp = description + strlen(description); sprintf(dp, ";user=%s", sesInfo->user_name); } dp = description + strlen(description); sprintf(dp, ";pid=0x%x", current->pid); cifs_dbg(FYI, "key description = %s\n", description); spnego_key = request_key(&cifs_spnego_key_type, description, ""); #ifdef CONFIG_CIFS_DEBUG2 if (cifsFYI && !IS_ERR(spnego_key)) { struct cifs_spnego_msg *msg = spnego_key->payload.data; cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U, msg->secblob_len + msg->sesskey_len)); } #endif /* CONFIG_CIFS_DEBUG2 */ out: kfree(description); return spnego_key; }
gpl-2.0
allwinner-ics/lichee_linux-3.0
drivers/input/keyboard/gpio_keys_polled.c
3269
6521
/* * Driver for buttons on GPIO lines not capable of generating interrupts * * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org> * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com> * * This file was based on: /drivers/input/misc/cobalt_btns.c * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * also was based on: /drivers/input/keyboard/gpio_keys.c * Copyright 2005 Phil Blundell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input-polldev.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #define DRV_NAME "gpio-keys-polled" struct gpio_keys_button_data { int last_state; int count; int threshold; int can_sleep; }; struct gpio_keys_polled_dev { struct input_polled_dev *poll_dev; struct device *dev; struct gpio_keys_platform_data *pdata; struct gpio_keys_button_data data[0]; }; static void gpio_keys_polled_check_state(struct input_dev *input, struct gpio_keys_button *button, struct gpio_keys_button_data *bdata) { int state; if (bdata->can_sleep) state = !!gpio_get_value_cansleep(button->gpio); else state = !!gpio_get_value(button->gpio); if (state != bdata->last_state) { unsigned int type = button->type ?: EV_KEY; input_event(input, type, button->code, !!(state ^ button->active_low)); input_sync(input); bdata->count = 0; bdata->last_state = state; } } static void gpio_keys_polled_poll(struct input_polled_dev *dev) { struct gpio_keys_polled_dev *bdev = dev->private; struct gpio_keys_platform_data *pdata = bdev->pdata; struct input_dev *input = dev->input; int i; for (i = 0; i < bdev->pdata->nbuttons; i++) { struct gpio_keys_button_data *bdata = &bdev->data[i]; if (bdata->count < bdata->threshold) bdata->count++; else gpio_keys_polled_check_state(input, &pdata->buttons[i], bdata); } } static void gpio_keys_polled_open(struct input_polled_dev *dev) { struct gpio_keys_polled_dev *bdev = dev->private; struct gpio_keys_platform_data *pdata = bdev->pdata; if (pdata->enable) pdata->enable(bdev->dev); } static void gpio_keys_polled_close(struct input_polled_dev *dev) { struct gpio_keys_polled_dev *bdev = dev->private; struct gpio_keys_platform_data *pdata = bdev->pdata; if (pdata->disable) pdata->disable(bdev->dev); } static int __devinit gpio_keys_polled_probe(struct platform_device *pdev) { struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct gpio_keys_polled_dev *bdev; struct input_polled_dev *poll_dev; struct input_dev *input; int error; int i; if (!pdata || !pdata->poll_interval) return -EINVAL; bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) + pdata->nbuttons * sizeof(struct gpio_keys_button_data), GFP_KERNEL); if (!bdev) { dev_err(dev, "no memory for private data\n"); return -ENOMEM; } poll_dev = input_allocate_polled_device(); if (!poll_dev) { dev_err(dev, "no memory for polled device\n"); error = -ENOMEM; goto err_free_bdev; } poll_dev->private = bdev; poll_dev->poll = gpio_keys_polled_poll; poll_dev->poll_interval = pdata->poll_interval; poll_dev->open = gpio_keys_polled_open; poll_dev->close = gpio_keys_polled_close; input = poll_dev->input; input->evbit[0] = BIT(EV_KEY); input->name = pdev->name; input->phys = DRV_NAME"/input0"; input->dev.parent = &pdev->dev; input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; for (i = 0; i < pdata->nbuttons; i++) { struct gpio_keys_button *button = &pdata->buttons[i]; struct gpio_keys_button_data *bdata = &bdev->data[i]; unsigned int gpio = button->gpio; unsigned int type = button->type ?: EV_KEY; if (button->wakeup) { dev_err(dev, DRV_NAME " does not support wakeup\n"); error = -EINVAL; goto err_free_gpio; } error = gpio_request(gpio, button->desc ? button->desc : DRV_NAME); if (error) { dev_err(dev, "unable to claim gpio %u, err=%d\n", gpio, error); goto err_free_gpio; } error = gpio_direction_input(gpio); if (error) { dev_err(dev, "unable to set direction on gpio %u, err=%d\n", gpio, error); goto err_free_gpio; } bdata->can_sleep = gpio_cansleep(gpio); bdata->last_state = -1; bdata->threshold = DIV_ROUND_UP(button->debounce_interval, pdata->poll_interval); input_set_capability(input, type, button->code); } bdev->poll_dev = poll_dev; bdev->dev = dev; bdev->pdata = pdata; platform_set_drvdata(pdev, bdev); error = input_register_polled_device(poll_dev); if (error) { dev_err(dev, "unable to register polled device, err=%d\n", error); goto err_free_gpio; } /* report initial state of the buttons */ for (i = 0; i < pdata->nbuttons; i++) gpio_keys_polled_check_state(input, &pdata->buttons[i], &bdev->data[i]); return 0; err_free_gpio: while (--i >= 0) gpio_free(pdata->buttons[i].gpio); input_free_polled_device(poll_dev); err_free_bdev: kfree(bdev); platform_set_drvdata(pdev, NULL); return error; } static int __devexit gpio_keys_polled_remove(struct platform_device *pdev) { struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev); struct gpio_keys_platform_data *pdata = bdev->pdata; int i; input_unregister_polled_device(bdev->poll_dev); for (i = 0; i < pdata->nbuttons; i++) gpio_free(pdata->buttons[i].gpio); input_free_polled_device(bdev->poll_dev); kfree(bdev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver gpio_keys_polled_driver = { .probe = gpio_keys_polled_probe, .remove = __devexit_p(gpio_keys_polled_remove), .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; static int __init gpio_keys_polled_init(void) { return platform_driver_register(&gpio_keys_polled_driver); } static void __exit gpio_keys_polled_exit(void) { platform_driver_unregister(&gpio_keys_polled_driver); } module_init(gpio_keys_polled_init); module_exit(gpio_keys_polled_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); MODULE_DESCRIPTION("Polled GPIO Buttons driver"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
ruffy91/u0lte_kernel
drivers/media/dvb/frontends/mt352.c
3269
14571
/* * Driver for Zarlink DVB-T MT352 demodulator * * Written by Holger Waechtler <holger@qanu.de> * and Daniel Mack <daniel@qanu.de> * * AVerMedia AVerTV DVB-T 771 support by * Wolfram Joost <dbox2@frokaschwei.de> * * Support for Samsung TDTC9251DH01C(M) tuner * Copyright (C) 2004 Antonio Mancuso <antonio.mancuso@digitaltelevision.it> * Amauri Celani <acelani@essegi.net> * * DVICO FusionHDTV DVB-T1 and DVICO FusionHDTV DVB-T Lite support by * Christopher Pascoe <c.pascoe@itee.uq.edu.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.= */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mt352_priv.h" #include "mt352.h" struct mt352_state { struct i2c_adapter* i2c; struct dvb_frontend frontend; /* configuration settings */ struct mt352_config config; }; static int debug; #define dprintk(args...) \ do { \ if (debug) printk(KERN_DEBUG "mt352: " args); \ } while (0) static int mt352_single_write(struct dvb_frontend *fe, u8 reg, u8 val) { struct mt352_state* state = fe->demodulator_priv; u8 buf[2] = { reg, val }; struct i2c_msg msg = { .addr = state->config.demod_address, .flags = 0, .buf = buf, .len = 2 }; int err = i2c_transfer(state->i2c, &msg, 1); if (err != 1) { printk("mt352_write() to reg %x failed (err = %d)!\n", reg, err); return err; } return 0; } static int _mt352_write(struct dvb_frontend* fe, const u8 ibuf[], int ilen) { int err,i; for (i=0; i < ilen-1; i++) if ((err = mt352_single_write(fe,ibuf[0]+i,ibuf[i+1]))) return err; return 0; } static int mt352_read_register(struct mt352_state* state, u8 reg) { int ret; u8 b0 [] = { reg }; u8 b1 [] = { 0 }; struct i2c_msg msg [] = { { .addr = state->config.demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk("%s: readreg error (reg=%d, ret==%i)\n", __func__, reg, ret); return ret; } return b1[0]; } static int mt352_sleep(struct dvb_frontend* fe) { static u8 mt352_softdown[] = { CLOCK_CTL, 0x20, 0x08 }; _mt352_write(fe, mt352_softdown, sizeof(mt352_softdown)); return 0; } static void mt352_calc_nominal_rate(struct mt352_state* state, enum fe_bandwidth bandwidth, unsigned char *buf) { u32 adc_clock = 20480; /* 20.340 MHz */ u32 bw,value; switch (bandwidth) { case BANDWIDTH_6_MHZ: bw = 6; break; case BANDWIDTH_7_MHZ: bw = 7; break; case BANDWIDTH_8_MHZ: default: bw = 8; break; } if (state->config.adc_clock) adc_clock = state->config.adc_clock; value = 64 * bw * (1<<16) / (7 * 8); value = value * 1000 / adc_clock; dprintk("%s: bw %d, adc_clock %d => 0x%x\n", __func__, bw, adc_clock, value); buf[0] = msb(value); buf[1] = lsb(value); } static void mt352_calc_input_freq(struct mt352_state* state, unsigned char *buf) { int adc_clock = 20480; /* 20.480000 MHz */ int if2 = 36167; /* 36.166667 MHz */ int ife,value; if (state->config.adc_clock) adc_clock = state->config.adc_clock; if (state->config.if2) if2 = state->config.if2; if (adc_clock >= if2 * 2) ife = if2; else { ife = adc_clock - (if2 % adc_clock); if (ife > adc_clock / 2) ife = adc_clock - ife; } value = -16374 * ife / adc_clock; dprintk("%s: if2 %d, ife %d, adc_clock %d => %d / 0x%x\n", __func__, if2, ife, adc_clock, value, value & 0x3fff); buf[0] = msb(value); buf[1] = lsb(value); } static int mt352_set_parameters(struct dvb_frontend* fe, struct dvb_frontend_parameters *param) { struct mt352_state* state = fe->demodulator_priv; unsigned char buf[13]; static unsigned char tuner_go[] = { 0x5d, 0x01 }; static unsigned char fsm_go[] = { 0x5e, 0x01 }; unsigned int tps = 0; struct dvb_ofdm_parameters *op = &param->u.ofdm; switch (op->code_rate_HP) { case FEC_2_3: tps |= (1 << 7); break; case FEC_3_4: tps |= (2 << 7); break; case FEC_5_6: tps |= (3 << 7); break; case FEC_7_8: tps |= (4 << 7); break; case FEC_1_2: case FEC_AUTO: break; default: return -EINVAL; } switch (op->code_rate_LP) { case FEC_2_3: tps |= (1 << 4); break; case FEC_3_4: tps |= (2 << 4); break; case FEC_5_6: tps |= (3 << 4); break; case FEC_7_8: tps |= (4 << 4); break; case FEC_1_2: case FEC_AUTO: break; case FEC_NONE: if (op->hierarchy_information == HIERARCHY_AUTO || op->hierarchy_information == HIERARCHY_NONE) break; default: return -EINVAL; } switch (op->constellation) { case QPSK: break; case QAM_AUTO: case QAM_16: tps |= (1 << 13); break; case QAM_64: tps |= (2 << 13); break; default: return -EINVAL; } switch (op->transmission_mode) { case TRANSMISSION_MODE_2K: case TRANSMISSION_MODE_AUTO: break; case TRANSMISSION_MODE_8K: tps |= (1 << 0); break; default: return -EINVAL; } switch (op->guard_interval) { case GUARD_INTERVAL_1_32: case GUARD_INTERVAL_AUTO: break; case GUARD_INTERVAL_1_16: tps |= (1 << 2); break; case GUARD_INTERVAL_1_8: tps |= (2 << 2); break; case GUARD_INTERVAL_1_4: tps |= (3 << 2); break; default: return -EINVAL; } switch (op->hierarchy_information) { case HIERARCHY_AUTO: case HIERARCHY_NONE: break; case HIERARCHY_1: tps |= (1 << 10); break; case HIERARCHY_2: tps |= (2 << 10); break; case HIERARCHY_4: tps |= (3 << 10); break; default: return -EINVAL; } buf[0] = TPS_GIVEN_1; /* TPS_GIVEN_1 and following registers */ buf[1] = msb(tps); /* TPS_GIVEN_(1|0) */ buf[2] = lsb(tps); buf[3] = 0x50; // old // buf[3] = 0xf4; // pinnacle mt352_calc_nominal_rate(state, op->bandwidth, buf+4); mt352_calc_input_freq(state, buf+6); if (state->config.no_tuner) { if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, param); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } _mt352_write(fe, buf, 8); _mt352_write(fe, fsm_go, 2); } else { if (fe->ops.tuner_ops.calc_regs) { fe->ops.tuner_ops.calc_regs(fe, param, buf+8, 5); buf[8] <<= 1; _mt352_write(fe, buf, sizeof(buf)); _mt352_write(fe, tuner_go, 2); } } return 0; } static int mt352_get_parameters(struct dvb_frontend* fe, struct dvb_frontend_parameters *param) { struct mt352_state* state = fe->demodulator_priv; u16 tps; u16 div; u8 trl; struct dvb_ofdm_parameters *op = &param->u.ofdm; static const u8 tps_fec_to_api[8] = { FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_7_8, FEC_AUTO, FEC_AUTO, FEC_AUTO }; if ( (mt352_read_register(state,0x00) & 0xC0) != 0xC0 ) return -EINVAL; /* Use TPS_RECEIVED-registers, not the TPS_CURRENT-registers because * the mt352 sometimes works with the wrong parameters */ tps = (mt352_read_register(state, TPS_RECEIVED_1) << 8) | mt352_read_register(state, TPS_RECEIVED_0); div = (mt352_read_register(state, CHAN_START_1) << 8) | mt352_read_register(state, CHAN_START_0); trl = mt352_read_register(state, TRL_NOMINAL_RATE_1); op->code_rate_HP = tps_fec_to_api[(tps >> 7) & 7]; op->code_rate_LP = tps_fec_to_api[(tps >> 4) & 7]; switch ( (tps >> 13) & 3) { case 0: op->constellation = QPSK; break; case 1: op->constellation = QAM_16; break; case 2: op->constellation = QAM_64; break; default: op->constellation = QAM_AUTO; break; } op->transmission_mode = (tps & 0x01) ? TRANSMISSION_MODE_8K : TRANSMISSION_MODE_2K; switch ( (tps >> 2) & 3) { case 0: op->guard_interval = GUARD_INTERVAL_1_32; break; case 1: op->guard_interval = GUARD_INTERVAL_1_16; break; case 2: op->guard_interval = GUARD_INTERVAL_1_8; break; case 3: op->guard_interval = GUARD_INTERVAL_1_4; break; default: op->guard_interval = GUARD_INTERVAL_AUTO; break; } switch ( (tps >> 10) & 7) { case 0: op->hierarchy_information = HIERARCHY_NONE; break; case 1: op->hierarchy_information = HIERARCHY_1; break; case 2: op->hierarchy_information = HIERARCHY_2; break; case 3: op->hierarchy_information = HIERARCHY_4; break; default: op->hierarchy_information = HIERARCHY_AUTO; break; } param->frequency = ( 500 * (div - IF_FREQUENCYx6) ) / 3 * 1000; if (trl == 0x72) op->bandwidth = BANDWIDTH_8_MHZ; else if (trl == 0x64) op->bandwidth = BANDWIDTH_7_MHZ; else op->bandwidth = BANDWIDTH_6_MHZ; if (mt352_read_register(state, STATUS_2) & 0x02) param->inversion = INVERSION_OFF; else param->inversion = INVERSION_ON; return 0; } static int mt352_read_status(struct dvb_frontend* fe, fe_status_t* status) { struct mt352_state* state = fe->demodulator_priv; int s0, s1, s3; /* FIXME: * * The MT352 design manual from Zarlink states (page 46-47): * * Notes about the TUNER_GO register: * * If the Read_Tuner_Byte (bit-1) is activated, then the tuner status * byte is copied from the tuner to the STATUS_3 register and * completion of the read operation is indicated by bit-5 of the * INTERRUPT_3 register. */ if ((s0 = mt352_read_register(state, STATUS_0)) < 0) return -EREMOTEIO; if ((s1 = mt352_read_register(state, STATUS_1)) < 0) return -EREMOTEIO; if ((s3 = mt352_read_register(state, STATUS_3)) < 0) return -EREMOTEIO; *status = 0; if (s0 & (1 << 4)) *status |= FE_HAS_CARRIER; if (s0 & (1 << 1)) *status |= FE_HAS_VITERBI; if (s0 & (1 << 5)) *status |= FE_HAS_LOCK; if (s1 & (1 << 1)) *status |= FE_HAS_SYNC; if (s3 & (1 << 6)) *status |= FE_HAS_SIGNAL; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int mt352_read_ber(struct dvb_frontend* fe, u32* ber) { struct mt352_state* state = fe->demodulator_priv; *ber = (mt352_read_register (state, RS_ERR_CNT_2) << 16) | (mt352_read_register (state, RS_ERR_CNT_1) << 8) | (mt352_read_register (state, RS_ERR_CNT_0)); return 0; } static int mt352_read_signal_strength(struct dvb_frontend* fe, u16* strength) { struct mt352_state* state = fe->demodulator_priv; /* align the 12 bit AGC gain with the most significant bits */ u16 signal = ((mt352_read_register(state, AGC_GAIN_1) & 0x0f) << 12) | (mt352_read_register(state, AGC_GAIN_0) << 4); /* inverse of gain is signal strength */ *strength = ~signal; return 0; } static int mt352_read_snr(struct dvb_frontend* fe, u16* snr) { struct mt352_state* state = fe->demodulator_priv; u8 _snr = mt352_read_register (state, SNR); *snr = (_snr << 8) | _snr; return 0; } static int mt352_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks) { struct mt352_state* state = fe->demodulator_priv; *ucblocks = (mt352_read_register (state, RS_UBC_1) << 8) | (mt352_read_register (state, RS_UBC_0)); return 0; } static int mt352_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fe_tune_settings) { fe_tune_settings->min_delay_ms = 800; fe_tune_settings->step_size = 0; fe_tune_settings->max_drift = 0; return 0; } static int mt352_init(struct dvb_frontend* fe) { struct mt352_state* state = fe->demodulator_priv; static u8 mt352_reset_attach [] = { RESET, 0xC0 }; dprintk("%s: hello\n",__func__); if ((mt352_read_register(state, CLOCK_CTL) & 0x10) == 0 || (mt352_read_register(state, CONFIG) & 0x20) == 0) { /* Do a "hard" reset */ _mt352_write(fe, mt352_reset_attach, sizeof(mt352_reset_attach)); return state->config.demod_init(fe); } return 0; } static void mt352_release(struct dvb_frontend* fe) { struct mt352_state* state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops mt352_ops; struct dvb_frontend* mt352_attach(const struct mt352_config* config, struct i2c_adapter* i2c) { struct mt352_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct mt352_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config,config,sizeof(struct mt352_config)); /* check if the demod is there */ if (mt352_read_register(state, CHIP_ID) != ID_MT352) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &mt352_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops mt352_ops = { .info = { .name = "Zarlink MT352 DVB-T", .type = FE_OFDM, .frequency_min = 174000000, .frequency_max = 862000000, .frequency_stepsize = 166667, .frequency_tolerance = 0, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .release = mt352_release, .init = mt352_init, .sleep = mt352_sleep, .write = _mt352_write, .set_frontend = mt352_set_parameters, .get_frontend = mt352_get_parameters, .get_tune_settings = mt352_get_tune_settings, .read_status = mt352_read_status, .read_ber = mt352_read_ber, .read_signal_strength = mt352_read_signal_strength, .read_snr = mt352_read_snr, .read_ucblocks = mt352_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Zarlink MT352 DVB-T Demodulator driver"); MODULE_AUTHOR("Holger Waechtler, Daniel Mack, Antonio Mancuso"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mt352_attach);
gpl-2.0
c313742678/qt210_kernel
arch/sparc/lib/ksyms.c
3525
5184
/* * Export of symbols defined in assembler */ /* Tell string.h we don't want memcpy etc. as cpp defines */ #define EXPORT_SYMTAB_STROPS #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/checksum.h> #include <asm/uaccess.h> #include <asm/ftrace.h> /* string functions */ EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(__strlen_user); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(strncmp); /* mem* functions */ extern void *__memscan_zero(void *, size_t); extern void *__memscan_generic(void *, int, size_t); extern void *__bzero(void *, size_t); EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(__memscan_zero); EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__bzero); /* Moving data to/from/in userspace. */ EXPORT_SYMBOL(__strncpy_from_user); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial); #ifdef CONFIG_MCOUNT EXPORT_SYMBOL(_mcount); #endif /* * sparc */ #ifdef CONFIG_SPARC32 extern int __ashrdi3(int, int); extern int __ashldi3(int, int); extern int __lshrdi3(int, int); extern int __muldi3(int, int); extern int __divdi3(int, int); extern void (*__copy_1page)(void *, const void *); extern void (*bzero_1page)(void *); extern int __strncmp(const char *, const char *, __kernel_size_t); extern void ___rw_read_enter(void); extern void ___rw_read_try(void); extern void ___rw_read_exit(void); extern void ___rw_write_enter(void); extern void ___atomic24_add(void); extern void ___atomic24_sub(void); /* Alias functions whose names begin with "." and export the aliases. * The module references will be fixed up by module_frob_arch_sections. */ extern int _Div(int, int); extern int _Mul(int, int); extern int _Rem(int, int); extern unsigned _Udiv(unsigned, unsigned); extern unsigned _Umul(unsigned, unsigned); extern unsigned _Urem(unsigned, unsigned); /* Networking helper routines. */ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); /* Special internal versions of library functions. */ EXPORT_SYMBOL(__copy_1page); EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(bzero_1page); /* string functions */ EXPORT_SYMBOL(__strncmp); /* Moving data to/from/in userspace. */ EXPORT_SYMBOL(__copy_user); /* Used by asm/spinlock.h */ #ifdef CONFIG_SMP EXPORT_SYMBOL(___rw_read_enter); EXPORT_SYMBOL(___rw_read_try); EXPORT_SYMBOL(___rw_read_exit); EXPORT_SYMBOL(___rw_write_enter); #endif /* Atomic operations. */ EXPORT_SYMBOL(___atomic24_add); EXPORT_SYMBOL(___atomic24_sub); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(_Rem); EXPORT_SYMBOL(_Urem); EXPORT_SYMBOL(_Mul); EXPORT_SYMBOL(_Umul); EXPORT_SYMBOL(_Div); EXPORT_SYMBOL(_Udiv); #endif /* * sparc64 */ #ifdef CONFIG_SPARC64 /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_from_user); EXPORT_SYMBOL(__csum_partial_copy_to_user); EXPORT_SYMBOL(ip_fast_csum); /* Moving data to/from/in userspace. */ EXPORT_SYMBOL(___copy_to_user); EXPORT_SYMBOL(___copy_from_user); EXPORT_SYMBOL(___copy_in_user); EXPORT_SYMBOL(__clear_user); /* RW semaphores */ EXPORT_SYMBOL(__down_read); EXPORT_SYMBOL(__down_read_trylock); EXPORT_SYMBOL(__down_write); EXPORT_SYMBOL(__down_write_trylock); EXPORT_SYMBOL(__up_read); EXPORT_SYMBOL(__up_write); EXPORT_SYMBOL(__downgrade_write); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); EXPORT_SYMBOL(atomic_add_ret); EXPORT_SYMBOL(atomic_sub); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); EXPORT_SYMBOL(atomic64_add_ret); EXPORT_SYMBOL(atomic64_sub); EXPORT_SYMBOL(atomic64_sub_ret); /* Atomic bit operations. */ EXPORT_SYMBOL(test_and_set_bit); EXPORT_SYMBOL(test_and_clear_bit); EXPORT_SYMBOL(test_and_change_bit); EXPORT_SYMBOL(set_bit); EXPORT_SYMBOL(clear_bit); EXPORT_SYMBOL(change_bit); /* Special internal versions of library functions. */ EXPORT_SYMBOL(_clear_page); EXPORT_SYMBOL(clear_user_page); EXPORT_SYMBOL(copy_user_page); /* RAID code needs this */ void VISenter(void); EXPORT_SYMBOL(VISenter); extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); EXPORT_SYMBOL(xor_vis_2); EXPORT_SYMBOL(xor_vis_3); EXPORT_SYMBOL(xor_vis_4); EXPORT_SYMBOL(xor_vis_5); extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *); extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *); extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *, unsigned long *, unsigned long *, unsigned long *); EXPORT_SYMBOL(xor_niagara_2); EXPORT_SYMBOL(xor_niagara_3); EXPORT_SYMBOL(xor_niagara_4); EXPORT_SYMBOL(xor_niagara_5); #endif
gpl-2.0
kozmikkick/KozmiKKerneL-KitKat
drivers/mtd/maps/intel_vr_nor.c
4805
7271
/* * drivers/mtd/maps/intel_vr_nor.c * * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel * Vermilion Range chipset. * * The Vermilion Range Expansion Bus supports four chip selects, each of which * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device * is a 256MiB memory region containing the address spaces for all four of the * chip selects, with start addresses hardcoded on 64MiB boundaries. * * This map driver only supports NOR flash on chip select 0. The buswidth * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does * not modify the value in the EXP_TIMING_CS0 register except to enable writing * and disable boot acceleration. The timing parameters in the register are * assumed to have been properly initialized by the BIOS. The reset default * timing parameters are maximally conservative (slow), so access to the flash * will be slower than it should be if the BIOS has not initialized the timing * parameters. * * Author: Andy Lowe <alowe@mvista.com> * * 2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/cfi.h> #include <linux/mtd/flashchip.h> #define DRV_NAME "vr_nor" struct vr_nor_mtd { void __iomem *csr_base; struct map_info map; struct mtd_info *info; struct pci_dev *dev; }; /* Expansion Bus Configuration and Status Registers are in BAR 0 */ #define EXP_CSR_MBAR 0 /* Expansion Bus Memory Window is BAR 1 */ #define EXP_WIN_MBAR 1 /* Maximum address space for Chip Select 0 is 64MiB */ #define CS0_SIZE 0x04000000 /* Chip Select 0 is at offset 0 in the Memory Window */ #define CS0_START 0x0 /* Chip Select 0 Timing Register is at offset 0 in CSR */ #define EXP_TIMING_CS0 0x00 #define TIMING_CS_EN (1 << 31) /* Chip Select Enable */ #define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */ #define TIMING_WR_EN (1 << 1) /* Write Enable */ #define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */ #define TIMING_MASK 0x3FFF0000 static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) { mtd_device_unregister(p->info); } static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) { /* register the flash bank */ /* partition the flash bank */ return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); } static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) { map_destroy(p->info); } static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p) { static const char *probe_types[] = { "cfi_probe", "jedec_probe", NULL }; const char **type; for (type = probe_types; !p->info && *type; type++) p->info = do_map_probe(*type, &p->map); if (!p->info) return -ENODEV; p->info->owner = THIS_MODULE; return 0; } static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p) { unsigned int exp_timing_cs0; /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); } /* * Initialize the map_info structure and map the flash. * Returns 0 on success, nonzero otherwise. */ static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p) { unsigned long csr_phys, csr_len; unsigned long win_phys, win_len; unsigned int exp_timing_cs0; int err; csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR); csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR); win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR); win_len = pci_resource_len(p->dev, EXP_WIN_MBAR); if (!csr_phys || !csr_len || !win_phys || !win_len) return -ENODEV; if (win_len < (CS0_START + CS0_SIZE)) return -ENXIO; p->csr_base = ioremap_nocache(csr_phys, csr_len); if (!p->csr_base) return -ENOMEM; exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); if (!(exp_timing_cs0 & TIMING_CS_EN)) { dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 " "is disabled.\n"); err = -ENODEV; goto release; } if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) { dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 " "is configured for maximally slow access times.\n"); } p->map.name = DRV_NAME; p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2; p->map.phys = win_phys + CS0_START; p->map.size = CS0_SIZE; p->map.virt = ioremap_nocache(p->map.phys, p->map.size); if (!p->map.virt) { err = -ENOMEM; goto release; } simple_map_init(&p->map); /* Enable writes to flash bank */ exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); return 0; release: iounmap(p->csr_base); return err; } static struct pci_device_id vr_nor_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)}, {0,} }; static void __devexit vr_nor_pci_remove(struct pci_dev *dev) { struct vr_nor_mtd *p = pci_get_drvdata(dev); pci_set_drvdata(dev, NULL); vr_nor_destroy_partitions(p); vr_nor_destroy_mtd_setup(p); vr_nor_destroy_maps(p); kfree(p); pci_release_regions(dev); pci_disable_device(dev); } static int __devinit vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vr_nor_mtd *p = NULL; unsigned int exp_timing_cs0; int err; err = pci_enable_device(dev); if (err) goto out; err = pci_request_regions(dev, DRV_NAME); if (err) goto disable_dev; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) goto release; p->dev = dev; err = vr_nor_init_maps(p); if (err) goto release; err = vr_nor_mtd_setup(p); if (err) goto destroy_maps; err = vr_nor_init_partitions(p); if (err) goto destroy_mtd_setup; pci_set_drvdata(dev, p); return 0; destroy_mtd_setup: map_destroy(p->info); destroy_maps: /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); release: kfree(p); pci_release_regions(dev); disable_dev: pci_disable_device(dev); out: return err; } static struct pci_driver vr_nor_pci_driver = { .name = DRV_NAME, .probe = vr_nor_pci_probe, .remove = __devexit_p(vr_nor_pci_remove), .id_table = vr_nor_pci_ids, }; static int __init vr_nor_mtd_init(void) { return pci_register_driver(&vr_nor_pci_driver); } static void __exit vr_nor_mtd_exit(void) { pci_unregister_driver(&vr_nor_pci_driver); } module_init(vr_nor_mtd_init); module_exit(vr_nor_mtd_exit); MODULE_AUTHOR("Andy Lowe"); MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
gpl-2.0
varunfsl/fsl_pamu
drivers/pcmcia/pxa2xx_sharpsl.c
4805
7191
/* * Sharp SL-C7xx Series PCMCIA routines * * Copyright (c) 2004-2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches and pxa2xx_mainstone.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/hardware/scoop.h> #include "soc_common.h" #define NO_KEEP_VS 0x0001 #define SCOOP_DEV platform_scoop_config->devs static void sharpsl_pcmcia_init_reset(struct soc_pcmcia_socket *skt) { struct scoop_pcmcia_dev *scoopdev = &SCOOP_DEV[skt->nr]; reset_scoop(scoopdev->dev); /* Shared power controls need to be handled carefully */ if (platform_scoop_config->power_ctrl) platform_scoop_config->power_ctrl(scoopdev->dev, 0x0000, skt->nr); else write_scoop_reg(scoopdev->dev, SCOOP_CPR, 0x0000); scoopdev->keep_vs = NO_KEEP_VS; scoopdev->keep_rd = 0; } static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { if (SCOOP_DEV[skt->nr].cd_irq >= 0) { skt->stat[SOC_STAT_CD].irq = SCOOP_DEV[skt->nr].cd_irq; skt->stat[SOC_STAT_CD].name = SCOOP_DEV[skt->nr].cd_irq_str; } skt->socket.pci_irq = SCOOP_DEV[skt->nr].irq; return 0; } static void sharpsl_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned short cpr, csr; struct device *scoop = SCOOP_DEV[skt->nr].dev; cpr = read_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR); write_scoop_reg(scoop, SCOOP_IRM, 0x00FF); write_scoop_reg(scoop, SCOOP_ISR, 0x0000); write_scoop_reg(scoop, SCOOP_IRM, 0x0000); csr = read_scoop_reg(scoop, SCOOP_CSR); if (csr & 0x0004) { /* card eject */ write_scoop_reg(scoop, SCOOP_CDR, 0x0000); SCOOP_DEV[skt->nr].keep_vs = NO_KEEP_VS; } else if (!(SCOOP_DEV[skt->nr].keep_vs & NO_KEEP_VS)) { /* keep vs1,vs2 */ write_scoop_reg(scoop, SCOOP_CDR, 0x0000); csr |= SCOOP_DEV[skt->nr].keep_vs; } else if (cpr & 0x0003) { /* power on */ write_scoop_reg(scoop, SCOOP_CDR, 0x0000); SCOOP_DEV[skt->nr].keep_vs = (csr & 0x00C0); } else { /* card detect */ if ((machine_is_spitz() || machine_is_borzoi()) && skt->nr == 1) { write_scoop_reg(scoop, SCOOP_CDR, 0x0000); } else { write_scoop_reg(scoop, SCOOP_CDR, 0x0002); } } state->detect = (csr & 0x0004) ? 0 : 1; state->ready = (csr & 0x0002) ? 1 : 0; state->bvd1 = (csr & 0x0010) ? 1 : 0; state->bvd2 = (csr & 0x0020) ? 1 : 0; state->wrprot = (csr & 0x0008) ? 1 : 0; state->vs_3v = (csr & 0x0040) ? 0 : 1; state->vs_Xv = (csr & 0x0080) ? 0 : 1; if ((cpr & 0x0080) && ((cpr & 0x8040) != 0x8040)) { printk(KERN_ERR "sharpsl_pcmcia_socket_state(): CPR=%04X, Low voltage!\n", cpr); } } static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned long flags; struct device *scoop = SCOOP_DEV[skt->nr].dev; unsigned short cpr, ncpr, ccr, nccr, mcr, nmcr, imr, nimr; switch (state->Vcc) { case 0: break; case 33: break; case 50: break; default: printk(KERN_ERR "sharpsl_pcmcia_configure_socket(): bad Vcc %u\n", state->Vcc); return -1; } if ((state->Vpp!=state->Vcc) && (state->Vpp!=0)) { printk(KERN_ERR "CF slot cannot support Vpp %u\n", state->Vpp); return -1; } local_irq_save(flags); nmcr = (mcr = read_scoop_reg(scoop, SCOOP_MCR)) & ~0x0010; ncpr = (cpr = read_scoop_reg(scoop, SCOOP_CPR)) & ~0x0083; nccr = (ccr = read_scoop_reg(scoop, SCOOP_CCR)) & ~0x0080; nimr = (imr = read_scoop_reg(scoop, SCOOP_IMR)) & ~0x003E; if ((machine_is_spitz() || machine_is_borzoi() || machine_is_akita()) && skt->nr == 0) { ncpr |= (state->Vcc == 33) ? 0x0002 : (state->Vcc == 50) ? 0x0002 : 0; } else { ncpr |= (state->Vcc == 33) ? 0x0001 : (state->Vcc == 50) ? 0x0002 : 0; } nmcr |= (state->flags&SS_IOCARD) ? 0x0010 : 0; ncpr |= (state->flags&SS_OUTPUT_ENA) ? 0x0080 : 0; nccr |= (state->flags&SS_RESET)? 0x0080: 0; nimr |= ((skt->status&SS_DETECT) ? 0x0004 : 0)| ((skt->status&SS_READY) ? 0x0002 : 0)| ((skt->status&SS_BATDEAD)? 0x0010 : 0)| ((skt->status&SS_BATWARN)? 0x0020 : 0)| ((skt->status&SS_STSCHG) ? 0x0010 : 0)| ((skt->status&SS_WRPROT) ? 0x0008 : 0); if (!(ncpr & 0x0003)) { SCOOP_DEV[skt->nr].keep_rd = 0; } else if (!SCOOP_DEV[skt->nr].keep_rd) { if (nccr & 0x0080) SCOOP_DEV[skt->nr].keep_rd = 1; else nccr |= 0x0080; } if (mcr != nmcr) write_scoop_reg(scoop, SCOOP_MCR, nmcr); if (cpr != ncpr) { if (platform_scoop_config->power_ctrl) platform_scoop_config->power_ctrl(scoop, ncpr , skt->nr); else write_scoop_reg(scoop, SCOOP_CPR, ncpr); } if (ccr != nccr) write_scoop_reg(scoop, SCOOP_CCR, nccr); if (imr != nimr) write_scoop_reg(scoop, SCOOP_IMR, nimr); local_irq_restore(flags); return 0; } static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { sharpsl_pcmcia_init_reset(skt); /* Enable interrupt */ write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_IMR, 0x00C0); write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_MCR, 0x0101); SCOOP_DEV[skt->nr].keep_vs = NO_KEEP_VS; } static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { sharpsl_pcmcia_init_reset(skt); } static struct pcmcia_low_level sharpsl_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = sharpsl_pcmcia_hw_init, .socket_state = sharpsl_pcmcia_socket_state, .configure_socket = sharpsl_pcmcia_configure_socket, .socket_init = sharpsl_pcmcia_socket_init, .socket_suspend = sharpsl_pcmcia_socket_suspend, .first = 0, .nr = 0, }; #ifdef CONFIG_SA1100_COLLIE #include "sa11xx_base.h" int pcmcia_collie_init(struct device *dev) { int ret = -ENODEV; if (machine_is_collie()) ret = sa11xx_drv_pcmcia_probe(dev, &sharpsl_pcmcia_ops, 0, 1); return ret; } #else static struct platform_device *sharpsl_pcmcia_device; static int __init sharpsl_pcmcia_init(void) { int ret; if (!platform_scoop_config) return -ENODEV; sharpsl_pcmcia_ops.nr = platform_scoop_config->num_devs; sharpsl_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!sharpsl_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(sharpsl_pcmcia_device, &sharpsl_pcmcia_ops, sizeof(sharpsl_pcmcia_ops)); if (ret == 0) { sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev; ret = platform_device_add(sharpsl_pcmcia_device); } if (ret) platform_device_put(sharpsl_pcmcia_device); return ret; } static void __exit sharpsl_pcmcia_exit(void) { platform_device_unregister(sharpsl_pcmcia_device); } fs_initcall(sharpsl_pcmcia_init); module_exit(sharpsl_pcmcia_exit); #endif MODULE_DESCRIPTION("Sharp SL Series PCMCIA Support"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pxa2xx-pcmcia");
gpl-2.0
Rashed97/android_kernel_samsung_lt03lte
sound/soc/fsl/mpc8610_hpcd.c
4805
17451
/** * Freescale MPC8610HPCD ALSA SoC Machine driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2007-2010 Freescale Semiconductor, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <linux/slab.h> #include <linux/of_i2c.h> #include <sound/soc.h> #include <asm/fsl_guts.h> #include "fsl_dma.h" #include "fsl_ssi.h" /* There's only one global utilities register */ static phys_addr_t guts_phys; #define DAI_NAME_SIZE 32 /** * mpc8610_hpcd_data: machine-specific ASoC device data * * This structure contains data for a single sound platform device on an * MPC8610 HPCD. Some of the data is taken from the device tree. */ struct mpc8610_hpcd_data { struct snd_soc_dai_link dai[2]; struct snd_soc_card card; unsigned int dai_format; unsigned int codec_clk_direction; unsigned int cpu_clk_direction; unsigned int clk_frequency; unsigned int ssi_id; /* 0 = SSI1, 1 = SSI2, etc */ unsigned int dma_id[2]; /* 0 = DMA1, 1 = DMA2, etc */ unsigned int dma_channel_id[2]; /* 0 = ch 0, 1 = ch 1, etc*/ char codec_dai_name[DAI_NAME_SIZE]; char codec_name[DAI_NAME_SIZE]; char platform_name[2][DAI_NAME_SIZE]; /* One for each DMA channel */ }; /** * mpc8610_hpcd_machine_probe: initialize the board * * This function is used to initialize the board-specific hardware. * * Here we program the DMACR and PMUXCR registers. */ static int mpc8610_hpcd_machine_probe(struct snd_soc_card *card) { struct mpc8610_hpcd_data *machine_data = container_of(card, struct mpc8610_hpcd_data, card); struct ccsr_guts __iomem *guts; guts = ioremap(guts_phys, sizeof(struct ccsr_guts)); if (!guts) { dev_err(card->dev, "could not map global utilities\n"); return -ENOMEM; } /* Program the signal routing between the SSI and the DMA */ guts_set_dmacr(guts, machine_data->dma_id[0], machine_data->dma_channel_id[0], CCSR_GUTS_DMACR_DEV_SSI); guts_set_dmacr(guts, machine_data->dma_id[1], machine_data->dma_channel_id[1], CCSR_GUTS_DMACR_DEV_SSI); guts_set_pmuxcr_dma(guts, machine_data->dma_id[0], machine_data->dma_channel_id[0], 0); guts_set_pmuxcr_dma(guts, machine_data->dma_id[1], machine_data->dma_channel_id[1], 0); switch (machine_data->ssi_id) { case 0: clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI1_MASK, CCSR_GUTS_PMUXCR_SSI1_SSI); break; case 1: clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI2_MASK, CCSR_GUTS_PMUXCR_SSI2_SSI); break; } iounmap(guts); return 0; } /** * mpc8610_hpcd_startup: program the board with various hardware parameters * * This function takes board-specific information, like clock frequencies * and serial data formats, and passes that information to the codec and * transport drivers. */ static int mpc8610_hpcd_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct mpc8610_hpcd_data *machine_data = container_of(rtd->card, struct mpc8610_hpcd_data, card); struct device *dev = rtd->card->dev; int ret = 0; /* Tell the codec driver what the serial protocol is. */ ret = snd_soc_dai_set_fmt(rtd->codec_dai, machine_data->dai_format); if (ret < 0) { dev_err(dev, "could not set codec driver audio format\n"); return ret; } /* * Tell the codec driver what the MCLK frequency is, and whether it's * a slave or master. */ ret = snd_soc_dai_set_sysclk(rtd->codec_dai, 0, machine_data->clk_frequency, machine_data->codec_clk_direction); if (ret < 0) { dev_err(dev, "could not set codec driver clock params\n"); return ret; } return 0; } /** * mpc8610_hpcd_machine_remove: Remove the sound device * * This function is called to remove the sound device for one SSI. We * de-program the DMACR and PMUXCR register. */ static int mpc8610_hpcd_machine_remove(struct snd_soc_card *card) { struct mpc8610_hpcd_data *machine_data = container_of(card, struct mpc8610_hpcd_data, card); struct ccsr_guts __iomem *guts; guts = ioremap(guts_phys, sizeof(struct ccsr_guts)); if (!guts) { dev_err(card->dev, "could not map global utilities\n"); return -ENOMEM; } /* Restore the signal routing */ guts_set_dmacr(guts, machine_data->dma_id[0], machine_data->dma_channel_id[0], 0); guts_set_dmacr(guts, machine_data->dma_id[1], machine_data->dma_channel_id[1], 0); switch (machine_data->ssi_id) { case 0: clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI1_MASK, CCSR_GUTS_PMUXCR_SSI1_LA); break; case 1: clrsetbits_be32(&guts->pmuxcr, CCSR_GUTS_PMUXCR_SSI2_MASK, CCSR_GUTS_PMUXCR_SSI2_LA); break; } iounmap(guts); return 0; } /** * mpc8610_hpcd_ops: ASoC machine driver operations */ static struct snd_soc_ops mpc8610_hpcd_ops = { .startup = mpc8610_hpcd_startup, }; /** * get_node_by_phandle_name - get a node by its phandle name * * This function takes a node, the name of a property in that node, and a * compatible string. Assuming the property is a phandle to another node, * it returns that node, (optionally) if that node is compatible. * * If the property is not a phandle, or the node it points to is not compatible * with the specific string, then NULL is returned. */ static struct device_node *get_node_by_phandle_name(struct device_node *np, const char *name, const char *compatible) { const phandle *ph; int len; ph = of_get_property(np, name, &len); if (!ph || (len != sizeof(phandle))) return NULL; np = of_find_node_by_phandle(*ph); if (!np) return NULL; if (compatible && !of_device_is_compatible(np, compatible)) { of_node_put(np); return NULL; } return np; } /** * get_parent_cell_index -- return the cell-index of the parent of a node * * Return the value of the cell-index property of the parent of the given * node. This is used for DMA channel nodes that need to know the DMA ID * of the controller they are on. */ static int get_parent_cell_index(struct device_node *np) { struct device_node *parent = of_get_parent(np); const u32 *iprop; if (!parent) return -1; iprop = of_get_property(parent, "cell-index", NULL); of_node_put(parent); if (!iprop) return -1; return be32_to_cpup(iprop); } /** * codec_node_dev_name - determine the dev_name for a codec node * * This function determines the dev_name for an I2C node. This is the name * that would be returned by dev_name() if this device_node were part of a * 'struct device' It's ugly and hackish, but it works. * * The dev_name for such devices include the bus number and I2C address. For * example, "cs4270.0-004f". */ static int codec_node_dev_name(struct device_node *np, char *buf, size_t len) { const u32 *iprop; int addr; char temp[DAI_NAME_SIZE]; struct i2c_client *i2c; of_modalias_node(np, temp, DAI_NAME_SIZE); iprop = of_get_property(np, "reg", NULL); if (!iprop) return -EINVAL; addr = be32_to_cpup(iprop); /* We need the adapter number */ i2c = of_find_i2c_device_by_node(np); if (!i2c) return -ENODEV; snprintf(buf, len, "%s.%u-%04x", temp, i2c->adapter->nr, addr); return 0; } static int get_dma_channel(struct device_node *ssi_np, const char *name, struct snd_soc_dai_link *dai, unsigned int *dma_channel_id, unsigned int *dma_id) { struct resource res; struct device_node *dma_channel_np; const u32 *iprop; int ret; dma_channel_np = get_node_by_phandle_name(ssi_np, name, "fsl,ssi-dma-channel"); if (!dma_channel_np) return -EINVAL; /* Determine the dev_name for the device_node. This code mimics the * behavior of of_device_make_bus_id(). We need this because ASoC uses * the dev_name() of the device to match the platform (DMA) device with * the CPU (SSI) device. It's all ugly and hackish, but it works (for * now). * * dai->platform name should already point to an allocated buffer. */ ret = of_address_to_resource(dma_channel_np, 0, &res); if (ret) return ret; snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s", (unsigned long long) res.start, dma_channel_np->name); iprop = of_get_property(dma_channel_np, "cell-index", NULL); if (!iprop) { of_node_put(dma_channel_np); return -EINVAL; } *dma_channel_id = be32_to_cpup(iprop); *dma_id = get_parent_cell_index(dma_channel_np); of_node_put(dma_channel_np); return 0; } /** * mpc8610_hpcd_probe: platform probe function for the machine driver * * Although this is a machine driver, the SSI node is the "master" node with * respect to audio hardware connections. Therefore, we create a new ASoC * device for each new SSI node that has a codec attached. */ static int mpc8610_hpcd_probe(struct platform_device *pdev) { struct device *dev = pdev->dev.parent; /* ssi_pdev is the platform device for the SSI node that probed us */ struct platform_device *ssi_pdev = container_of(dev, struct platform_device, dev); struct device_node *np = ssi_pdev->dev.of_node; struct device_node *codec_np = NULL; struct platform_device *sound_device = NULL; struct mpc8610_hpcd_data *machine_data; int ret = -ENODEV; const char *sprop; const u32 *iprop; /* Find the codec node for this SSI. */ codec_np = of_parse_phandle(np, "codec-handle", 0); if (!codec_np) { dev_err(dev, "invalid codec node\n"); return -EINVAL; } machine_data = kzalloc(sizeof(struct mpc8610_hpcd_data), GFP_KERNEL); if (!machine_data) { ret = -ENOMEM; goto error_alloc; } machine_data->dai[0].cpu_dai_name = dev_name(&ssi_pdev->dev); machine_data->dai[0].ops = &mpc8610_hpcd_ops; /* Determine the codec name, it will be used as the codec DAI name */ ret = codec_node_dev_name(codec_np, machine_data->codec_name, DAI_NAME_SIZE); if (ret) { dev_err(&pdev->dev, "invalid codec node %s\n", codec_np->full_name); ret = -EINVAL; goto error; } machine_data->dai[0].codec_name = machine_data->codec_name; /* The DAI name from the codec (snd_soc_dai_driver.name) */ machine_data->dai[0].codec_dai_name = "cs4270-hifi"; /* We register two DAIs per SSI, one for playback and the other for * capture. Currently, we only support codecs that have one DAI for * both playback and capture. */ memcpy(&machine_data->dai[1], &machine_data->dai[0], sizeof(struct snd_soc_dai_link)); /* Get the device ID */ iprop = of_get_property(np, "cell-index", NULL); if (!iprop) { dev_err(&pdev->dev, "cell-index property not found\n"); ret = -EINVAL; goto error; } machine_data->ssi_id = be32_to_cpup(iprop); /* Get the serial format and clock direction. */ sprop = of_get_property(np, "fsl,mode", NULL); if (!sprop) { dev_err(&pdev->dev, "fsl,mode property not found\n"); ret = -EINVAL; goto error; } if (strcasecmp(sprop, "i2s-slave") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; /* In i2s-slave mode, the codec has its own clock source, so we * need to get the frequency from the device tree and pass it to * the codec driver. */ iprop = of_get_property(codec_np, "clock-frequency", NULL); if (!iprop || !*iprop) { dev_err(&pdev->dev, "codec bus-frequency " "property is missing or invalid\n"); ret = -EINVAL; goto error; } machine_data->clk_frequency = be32_to_cpup(iprop); } else if (strcasecmp(sprop, "i2s-master") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "lj-slave") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "lj-master") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "rj-slave") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "rj-master") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else if (strcasecmp(sprop, "ac97-slave") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM; machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT; machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN; } else if (strcasecmp(sprop, "ac97-master") == 0) { machine_data->dai_format = SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS; machine_data->codec_clk_direction = SND_SOC_CLOCK_IN; machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT; } else { dev_err(&pdev->dev, "unrecognized fsl,mode property '%s'\n", sprop); ret = -EINVAL; goto error; } if (!machine_data->clk_frequency) { dev_err(&pdev->dev, "unknown clock frequency\n"); ret = -EINVAL; goto error; } /* Find the playback DMA channel to use. */ machine_data->dai[0].platform_name = machine_data->platform_name[0]; ret = get_dma_channel(np, "fsl,playback-dma", &machine_data->dai[0], &machine_data->dma_channel_id[0], &machine_data->dma_id[0]); if (ret) { dev_err(&pdev->dev, "missing/invalid playback DMA phandle\n"); goto error; } /* Find the capture DMA channel to use. */ machine_data->dai[1].platform_name = machine_data->platform_name[1]; ret = get_dma_channel(np, "fsl,capture-dma", &machine_data->dai[1], &machine_data->dma_channel_id[1], &machine_data->dma_id[1]); if (ret) { dev_err(&pdev->dev, "missing/invalid capture DMA phandle\n"); goto error; } /* Initialize our DAI data structure. */ machine_data->dai[0].stream_name = "playback"; machine_data->dai[1].stream_name = "capture"; machine_data->dai[0].name = machine_data->dai[0].stream_name; machine_data->dai[1].name = machine_data->dai[1].stream_name; machine_data->card.probe = mpc8610_hpcd_machine_probe; machine_data->card.remove = mpc8610_hpcd_machine_remove; machine_data->card.name = pdev->name; /* The platform driver name */ machine_data->card.num_links = 2; machine_data->card.dai_link = machine_data->dai; /* Allocate a new audio platform device structure */ sound_device = platform_device_alloc("soc-audio", -1); if (!sound_device) { dev_err(&pdev->dev, "platform device alloc failed\n"); ret = -ENOMEM; goto error; } /* Associate the card data with the sound device */ platform_set_drvdata(sound_device, &machine_data->card); /* Register with ASoC */ ret = platform_device_add(sound_device); if (ret) { dev_err(&pdev->dev, "platform device add failed\n"); goto error_sound; } dev_set_drvdata(&pdev->dev, sound_device); of_node_put(codec_np); return 0; error_sound: platform_device_put(sound_device); error: kfree(machine_data); error_alloc: of_node_put(codec_np); return ret; } /** * mpc8610_hpcd_remove: remove the platform device * * This function is called when the platform device is removed. */ static int __devexit mpc8610_hpcd_remove(struct platform_device *pdev) { struct platform_device *sound_device = dev_get_drvdata(&pdev->dev); struct snd_soc_card *card = platform_get_drvdata(sound_device); struct mpc8610_hpcd_data *machine_data = container_of(card, struct mpc8610_hpcd_data, card); platform_device_unregister(sound_device); kfree(machine_data); sound_device->dev.platform_data = NULL; dev_set_drvdata(&pdev->dev, NULL); return 0; } static struct platform_driver mpc8610_hpcd_driver = { .probe = mpc8610_hpcd_probe, .remove = __devexit_p(mpc8610_hpcd_remove), .driver = { /* The name must match 'compatible' property in the device tree, * in lowercase letters. */ .name = "snd-soc-mpc8610hpcd", .owner = THIS_MODULE, }, }; /** * mpc8610_hpcd_init: machine driver initialization. * * This function is called when this module is loaded. */ static int __init mpc8610_hpcd_init(void) { struct device_node *guts_np; struct resource res; pr_info("Freescale MPC8610 HPCD ALSA SoC machine driver\n"); /* Get the physical address of the global utilities registers */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,mpc8610-guts"); if (of_address_to_resource(guts_np, 0, &res)) { pr_err("mpc8610-hpcd: missing/invalid global utilities node\n"); return -EINVAL; } guts_phys = res.start; return platform_driver_register(&mpc8610_hpcd_driver); } /** * mpc8610_hpcd_exit: machine driver exit * * This function is called when this driver is unloaded. */ static void __exit mpc8610_hpcd_exit(void) { platform_driver_unregister(&mpc8610_hpcd_driver); } module_init(mpc8610_hpcd_init); module_exit(mpc8610_hpcd_exit); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale MPC8610 HPCD ALSA SoC machine driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
pausa/android_kernel_htc_k2_ul
drivers/net/ethernet/dec/tulip/de2104x.c
4805
54710
/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */ /* Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com> Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c] Written/copyright 1994-2001 by Donald Becker. [tulip.c] This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. See the file COPYING in this distribution for more information. TODO, in rough priority order: * Support forcing media type with a module parameter, like dl2k.c/sundance.c * Constants (module parms?) for Rx work limit * Complete reset on PciErr * Jumbo frames / dev->change_mtu * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error * Implement Tx software interrupt mitigation via Tx descriptor bit */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "de2104x" #define DRV_VERSION "0.7" #define DRV_RELDATE "Mar 17, 2004" #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/compiler.h> #include <linux/rtnetlink.h> #include <linux/crc32.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/unaligned.h> /* These identify the driver base version and may not be removed. */ static char version[] = "PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")"; MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static int debug = -1; module_param (debug, int, 0); MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ defined(CONFIG_SPARC) || defined(__ia64__) || \ defined(__sh__) || defined(__mips__) static int rx_copybreak = 1518; #else static int rx_copybreak = 100; #endif module_param (rx_copybreak, int, 0); MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied"); #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) /* Descriptor skip length in 32 bit longwords. */ #ifndef CONFIG_DE2104X_DSL #define DSL 0 #else #define DSL CONFIG_DE2104X_DSL #endif #define DE_RX_RING_SIZE 64 #define DE_TX_RING_SIZE 64 #define DE_RING_BYTES \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ (sizeof(struct de_desc) * DE_TX_RING_SIZE)) #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1)) #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1)) #define TX_BUFFS_AVAIL(CP) \ (((CP)->tx_tail <= (CP)->tx_head) ? \ (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \ (CP)->tx_tail - (CP)->tx_head - 1) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ #define RX_OFFSET 2 #define DE_SETUP_SKB ((struct sk_buff *) 1) #define DE_DUMMY_SKB ((struct sk_buff *) 2) #define DE_SETUP_FRAME_WORDS 96 #define DE_EEPROM_WORDS 256 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16)) #define DE_MAX_MEDIA 5 #define DE_MEDIA_TP_AUTO 0 #define DE_MEDIA_BNC 1 #define DE_MEDIA_AUI 2 #define DE_MEDIA_TP 3 #define DE_MEDIA_TP_FD 4 #define DE_MEDIA_INVALID DE_MAX_MEDIA #define DE_MEDIA_FIRST 0 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1) #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC) #define DE_TIMER_LINK (60 * HZ) #define DE_TIMER_NO_LINK (5 * HZ) #define DE_NUM_REGS 16 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32)) #define DE_REGS_VER 1 /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6*HZ) /* This is a mysterious value that can be written to CSR11 in the 21040 (only) to support a pre-NWay full-duplex signaling mechanism using short frames. No one knows what it should be, but if left at its default value some 10base2(!) packets trigger a full-duplex-request interrupt. */ #define FULL_DUPLEX_MAGIC 0x6969 enum { /* NIC registers */ BusMode = 0x00, TxPoll = 0x08, RxPoll = 0x10, RxRingAddr = 0x18, TxRingAddr = 0x20, MacStatus = 0x28, MacMode = 0x30, IntrMask = 0x38, RxMissed = 0x40, ROMCmd = 0x48, CSR11 = 0x58, SIAStatus = 0x60, CSR13 = 0x68, CSR14 = 0x70, CSR15 = 0x78, PCIPM = 0x40, /* BusMode bits */ CmdReset = (1 << 0), CacheAlign16 = 0x00008000, BurstLen4 = 0x00000400, DescSkipLen = (DSL << 2), /* Rx/TxPoll bits */ NormalTxPoll = (1 << 0), NormalRxPoll = (1 << 0), /* Tx/Rx descriptor status bits */ DescOwn = (1 << 31), RxError = (1 << 15), RxErrLong = (1 << 7), RxErrCRC = (1 << 1), RxErrFIFO = (1 << 0), RxErrRunt = (1 << 11), RxErrFrame = (1 << 14), RingEnd = (1 << 25), FirstFrag = (1 << 29), LastFrag = (1 << 30), TxError = (1 << 15), TxFIFOUnder = (1 << 1), TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11), TxMaxCol = (1 << 8), TxOWC = (1 << 9), TxJabber = (1 << 14), SetupFrame = (1 << 27), TxSwInt = (1 << 31), /* MacStatus bits */ IntrOK = (1 << 16), IntrErr = (1 << 15), RxIntr = (1 << 6), RxEmpty = (1 << 7), TxIntr = (1 << 0), TxEmpty = (1 << 2), PciErr = (1 << 13), TxState = (1 << 22) | (1 << 21) | (1 << 20), RxState = (1 << 19) | (1 << 18) | (1 << 17), LinkFail = (1 << 12), LinkPass = (1 << 4), RxStopped = (1 << 8), TxStopped = (1 << 1), /* MacMode bits */ TxEnable = (1 << 13), RxEnable = (1 << 1), RxTx = TxEnable | RxEnable, FullDuplex = (1 << 9), AcceptAllMulticast = (1 << 7), AcceptAllPhys = (1 << 6), BOCnt = (1 << 5), MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) | RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast, /* ROMCmd bits */ EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */ EE_CS = 0x01, /* EEPROM chip select. */ EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */ EE_WRITE_0 = 0x01, EE_WRITE_1 = 0x05, EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */ EE_ENB = (0x4800 | EE_CS), /* The EEPROM commands include the alway-set leading bit. */ EE_READ_CMD = 6, /* RxMissed bits */ RxMissedOver = (1 << 16), RxMissedMask = 0xffff, /* SROM-related bits */ SROMC0InfoLeaf = 27, MediaBlockMask = 0x3f, MediaCustomCSRs = (1 << 6), /* PCIPM bits */ PM_Sleep = (1 << 31), PM_Snooze = (1 << 30), PM_Mask = PM_Sleep | PM_Snooze, /* SIAStatus bits */ NWayState = (1 << 14) | (1 << 13) | (1 << 12), NWayRestart = (1 << 12), NonselPortActive = (1 << 9), SelPortActive = (1 << 8), LinkFailStatus = (1 << 2), NetCxnErr = (1 << 1), }; static const u32 de_intr_mask = IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty | LinkPass | LinkFail | PciErr; /* * Set the programmable burst length to 4 longwords for all: * DMA errors result without these values. Cache align 16 long. */ static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen; struct de_srom_media_block { u8 opts; u16 csr13; u16 csr14; u16 csr15; } __packed; struct de_srom_info_leaf { u16 default_media; u8 n_blocks; u8 unused; } __packed; struct de_desc { __le32 opts1; __le32 opts2; __le32 addr1; __le32 addr2; #if DSL __le32 skip[DSL]; #endif }; struct media_info { u16 type; /* DE_MEDIA_xxx */ u16 csr13; u16 csr14; u16 csr15; }; struct ring_info { struct sk_buff *skb; dma_addr_t mapping; }; struct de_private { unsigned tx_head; unsigned tx_tail; unsigned rx_tail; void __iomem *regs; struct net_device *dev; spinlock_t lock; struct de_desc *rx_ring; struct de_desc *tx_ring; struct ring_info tx_skb[DE_TX_RING_SIZE]; struct ring_info rx_skb[DE_RX_RING_SIZE]; unsigned rx_buf_sz; dma_addr_t ring_dma; u32 msg_enable; struct net_device_stats net_stats; struct pci_dev *pdev; u16 setup_frame[DE_SETUP_FRAME_WORDS]; u32 media_type; u32 media_supported; u32 media_advertise; struct media_info media[DE_MAX_MEDIA]; struct timer_list media_timer; u8 *ee_data; unsigned board_idx; unsigned de21040 : 1; unsigned media_lock : 1; }; static void de_set_rx_mode (struct net_device *dev); static void de_tx (struct de_private *de); static void de_clean_rings (struct de_private *de); static void de_media_interrupt (struct de_private *de, u32 status); static void de21040_media_timer (unsigned long data); static void de21041_media_timer (unsigned long data); static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { }, }; MODULE_DEVICE_TABLE(pci, de_pci_tbl); static const char * const media_name[DE_MAX_MEDIA] = { "10baseT auto", "BNC", "AUI", "10baseT-HD", "10baseT-FD" }; /* 21040 transceiver register settings: * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/ static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, }; static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, }; static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, }; /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; #define dr32(reg) ioread32(de->regs + (reg)) #define dw32(reg, val) iowrite32((val), de->regs + (reg)) static void de_rx_err_acct (struct de_private *de, unsigned rx_tail, u32 status, u32 len) { netif_dbg(de, rx_err, de->dev, "rx err, slot %d status 0x%x len %d\n", rx_tail, status, len); if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { netif_warn(de, rx_err, de->dev, "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", status); de->net_stats.rx_length_errors++; } } else if (status & RxError) { /* There was a fatal error. */ de->net_stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) de->net_stats.rx_length_errors++; if (status & RxErrCRC) de->net_stats.rx_crc_errors++; if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++; } } static void de_rx (struct de_private *de) { unsigned rx_tail = de->rx_tail; unsigned rx_work = DE_RX_RING_SIZE; unsigned drop = 0; int rc; while (--rx_work) { u32 status, len; dma_addr_t mapping; struct sk_buff *skb, *copy_skb; unsigned copying_skb, buflen; skb = de->rx_skb[rx_tail].skb; BUG_ON(!skb); rmb(); status = le32_to_cpu(de->rx_ring[rx_tail].opts1); if (status & DescOwn) break; len = ((status >> 16) & 0x7ff) - 4; mapping = de->rx_skb[rx_tail].mapping; if (unlikely(drop)) { de->net_stats.rx_dropped++; goto rx_next; } if (unlikely((status & 0x38008300) != 0x0300)) { de_rx_err_acct(de, rx_tail, status, len); goto rx_next; } copying_skb = (len <= rx_copybreak); netif_dbg(de, rx_status, de->dev, "rx slot %d status 0x%x len %d copying? %d\n", rx_tail, status, len, copying_skb); buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; copy_skb = netdev_alloc_skb(de->dev, buflen); if (unlikely(!copy_skb)) { de->net_stats.rx_dropped++; drop = 1; rx_work = 100; goto rx_next; } if (!copying_skb) { pci_unmap_single(de->pdev, mapping, buflen, PCI_DMA_FROMDEVICE); skb_put(skb, len); mapping = de->rx_skb[rx_tail].mapping = pci_map_single(de->pdev, copy_skb->data, buflen, PCI_DMA_FROMDEVICE); de->rx_skb[rx_tail].skb = copy_skb; } else { pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); skb_reserve(copy_skb, RX_OFFSET); skb_copy_from_linear_data(skb, skb_put(copy_skb, len), len); pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; } skb->protocol = eth_type_trans (skb, de->dev); de->net_stats.rx_packets++; de->net_stats.rx_bytes += skb->len; rc = netif_rx (skb); if (rc == NET_RX_DROP) drop = 1; rx_next: if (rx_tail == (DE_RX_RING_SIZE - 1)) de->rx_ring[rx_tail].opts2 = cpu_to_le32(RingEnd | de->rx_buf_sz); else de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); wmb(); de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); rx_tail = NEXT_RX(rx_tail); } if (!rx_work) netdev_warn(de->dev, "rx work limit reached\n"); de->rx_tail = rx_tail; } static irqreturn_t de_interrupt (int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct de_private *de = netdev_priv(dev); u32 status; status = dr32(MacStatus); if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF)) return IRQ_NONE; netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n", status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail); dw32(MacStatus, status); if (status & (RxIntr | RxEmpty)) { de_rx(de); if (status & RxEmpty) dw32(RxPoll, NormalRxPoll); } spin_lock(&de->lock); if (status & (TxIntr | TxEmpty)) de_tx(de); if (status & (LinkPass | LinkFail)) de_media_interrupt(de, status); spin_unlock(&de->lock); if (status & PciErr) { u16 pci_status; pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); pci_write_config_word(de->pdev, PCI_STATUS, pci_status); netdev_err(de->dev, "PCI bus error, status=%08x, PCI status=%04x\n", status, pci_status); } return IRQ_HANDLED; } static void de_tx (struct de_private *de) { unsigned tx_head = de->tx_head; unsigned tx_tail = de->tx_tail; while (tx_tail != tx_head) { struct sk_buff *skb; u32 status; rmb(); status = le32_to_cpu(de->tx_ring[tx_tail].opts1); if (status & DescOwn) break; skb = de->tx_skb[tx_tail].skb; BUG_ON(!skb); if (unlikely(skb == DE_DUMMY_SKB)) goto next; if (unlikely(skb == DE_SETUP_SKB)) { pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, sizeof(de->setup_frame), PCI_DMA_TODEVICE); goto next; } pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, skb->len, PCI_DMA_TODEVICE); if (status & LastFrag) { if (status & TxError) { netif_dbg(de, tx_err, de->dev, "tx err, status 0x%x\n", status); de->net_stats.tx_errors++; if (status & TxOWC) de->net_stats.tx_window_errors++; if (status & TxMaxCol) de->net_stats.tx_aborted_errors++; if (status & TxLinkFail) de->net_stats.tx_carrier_errors++; if (status & TxFIFOUnder) de->net_stats.tx_fifo_errors++; } else { de->net_stats.tx_packets++; de->net_stats.tx_bytes += skb->len; netif_dbg(de, tx_done, de->dev, "tx done, slot %d\n", tx_tail); } dev_kfree_skb_irq(skb); } next: de->tx_skb[tx_tail].skb = NULL; tx_tail = NEXT_TX(tx_tail); } de->tx_tail = tx_tail; if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4))) netif_wake_queue(de->dev); } static netdev_tx_t de_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct de_private *de = netdev_priv(dev); unsigned int entry, tx_free; u32 mapping, len, flags = FirstFrag | LastFrag; struct de_desc *txd; spin_lock_irq(&de->lock); tx_free = TX_BUFFS_AVAIL(de); if (tx_free == 0) { netif_stop_queue(dev); spin_unlock_irq(&de->lock); return NETDEV_TX_BUSY; } tx_free--; entry = de->tx_head; txd = &de->tx_ring[entry]; len = skb->len; mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE); if (entry == (DE_TX_RING_SIZE - 1)) flags |= RingEnd; if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2))) flags |= TxSwInt; flags |= len; txd->opts2 = cpu_to_le32(flags); txd->addr1 = cpu_to_le32(mapping); de->tx_skb[entry].skb = skb; de->tx_skb[entry].mapping = mapping; wmb(); txd->opts1 = cpu_to_le32(DescOwn); wmb(); de->tx_head = NEXT_TX(entry); netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n", entry, skb->len); if (tx_free == 0) netif_stop_queue(dev); spin_unlock_irq(&de->lock); /* Trigger an immediate transmit demand. */ dw32(TxPoll, NormalTxPoll); return NETDEV_TX_OK; } /* Set or clear the multicast filter for this adaptor. Note that we only use exclusion around actually queueing the new frame, not around filling de->setup_frame. This is non-deterministic when re-entered but still correct. */ #undef set_bit_le #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); u16 hash_table[32]; struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ netdev_for_each_mc_addr(ha, dev) { int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } for (i = 0; i < 32; i++) { *setup_frm++ = hash_table[i]; *setup_frm++ = hash_table[i]; } setup_frm = &de->setup_frame[13*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; } static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ netdev_for_each_mc_addr(ha, dev) { eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; } /* Fill the unused entries with the broadcast address. */ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); setup_frm = &de->setup_frame[15*6]; /* Fill the final entry with our physical address. */ eaddrs = (u16 *)dev->dev_addr; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; } static void __de_set_rx_mode (struct net_device *dev) { struct de_private *de = netdev_priv(dev); u32 macmode; unsigned int entry; u32 mapping; struct de_desc *txd; struct de_desc *dummy_txd = NULL; macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ macmode |= AcceptAllMulticast | AcceptAllPhys; goto out; } if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter well -- accept all multicasts. */ macmode |= AcceptAllMulticast; goto out; } /* Note that only the low-address shortword of setup_frame is valid! The values are doubled for big-endian architectures. */ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */ build_setup_frame_hash (de->setup_frame, dev); else build_setup_frame_perfect (de->setup_frame, dev); /* * Now add this frame to the Tx list. */ entry = de->tx_head; /* Avoid a chip errata by prefixing a dummy entry. */ if (entry != 0) { de->tx_skb[entry].skb = DE_DUMMY_SKB; dummy_txd = &de->tx_ring[entry]; dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ? cpu_to_le32(RingEnd) : 0; dummy_txd->addr1 = 0; /* Must set DescOwned later to avoid race with chip */ entry = NEXT_TX(entry); } de->tx_skb[entry].skb = DE_SETUP_SKB; de->tx_skb[entry].mapping = mapping = pci_map_single (de->pdev, de->setup_frame, sizeof (de->setup_frame), PCI_DMA_TODEVICE); /* Put the setup frame on the Tx list. */ txd = &de->tx_ring[entry]; if (entry == (DE_TX_RING_SIZE - 1)) txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame)); else txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame)); txd->addr1 = cpu_to_le32(mapping); wmb(); txd->opts1 = cpu_to_le32(DescOwn); wmb(); if (dummy_txd) { dummy_txd->opts1 = cpu_to_le32(DescOwn); wmb(); } de->tx_head = NEXT_TX(entry); if (TX_BUFFS_AVAIL(de) == 0) netif_stop_queue(dev); /* Trigger an immediate transmit demand. */ dw32(TxPoll, NormalTxPoll); out: if (macmode != dr32(MacMode)) dw32(MacMode, macmode); } static void de_set_rx_mode (struct net_device *dev) { unsigned long flags; struct de_private *de = netdev_priv(dev); spin_lock_irqsave (&de->lock, flags); __de_set_rx_mode(dev); spin_unlock_irqrestore (&de->lock, flags); } static inline void de_rx_missed(struct de_private *de, u32 rx_missed) { if (unlikely(rx_missed & RxMissedOver)) de->net_stats.rx_missed_errors += RxMissedMask; else de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask); } static void __de_get_stats(struct de_private *de) { u32 tmp = dr32(RxMissed); /* self-clearing */ de_rx_missed(de, tmp); } static struct net_device_stats *de_get_stats(struct net_device *dev) { struct de_private *de = netdev_priv(dev); /* The chip only need report frame silently dropped. */ spin_lock_irq(&de->lock); if (netif_running(dev) && netif_device_present(dev)) __de_get_stats(de); spin_unlock_irq(&de->lock); return &de->net_stats; } static inline int de_is_running (struct de_private *de) { return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0; } static void de_stop_rxtx (struct de_private *de) { u32 macmode; unsigned int i = 1300/100; macmode = dr32(MacMode); if (macmode & RxTx) { dw32(MacMode, macmode & ~RxTx); dr32(MacMode); } /* wait until in-flight frame completes. * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) * Typically expect this loop to end in < 50 us on 100BT. */ while (--i) { if (!de_is_running(de)) return; udelay(100); } netdev_warn(de->dev, "timeout expired, stopping DMA\n"); } static inline void de_start_rxtx (struct de_private *de) { u32 macmode; macmode = dr32(MacMode); if ((macmode & RxTx) != RxTx) { dw32(MacMode, macmode | RxTx); dr32(MacMode); } } static void de_stop_hw (struct de_private *de) { udelay(5); dw32(IntrMask, 0); de_stop_rxtx(de); dw32(MacStatus, dr32(MacStatus)); udelay(10); de->rx_tail = 0; de->tx_head = de->tx_tail = 0; } static void de_link_up(struct de_private *de) { if (!netif_carrier_ok(de->dev)) { netif_carrier_on(de->dev); netif_info(de, link, de->dev, "link up, media %s\n", media_name[de->media_type]); } } static void de_link_down(struct de_private *de) { if (netif_carrier_ok(de->dev)) { netif_carrier_off(de->dev); netif_info(de, link, de->dev, "link down\n"); } } static void de_set_media (struct de_private *de) { unsigned media = de->media_type; u32 macmode = dr32(MacMode); if (de_is_running(de)) netdev_warn(de->dev, "chip is running while changing media!\n"); if (de->de21040) dw32(CSR11, FULL_DUPLEX_MAGIC); dw32(CSR13, 0); /* Reset phy */ dw32(CSR14, de->media[media].csr14); dw32(CSR15, de->media[media].csr15); dw32(CSR13, de->media[media].csr13); /* must delay 10ms before writing to other registers, * especially CSR6 */ mdelay(10); if (media == DE_MEDIA_TP_FD) macmode |= FullDuplex; else macmode &= ~FullDuplex; netif_info(de, link, de->dev, "set link %s\n", media_name[media]); netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n", dr32(MacMode), dr32(SIAStatus), dr32(CSR13), dr32(CSR14), dr32(CSR15)); netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", macmode, de->media[media].csr13, de->media[media].csr14, de->media[media].csr15); if (macmode != dr32(MacMode)) dw32(MacMode, macmode); } static void de_next_media (struct de_private *de, const u32 *media, unsigned int n_media) { unsigned int i; for (i = 0; i < n_media; i++) { if (de_ok_to_advertise(de, media[i])) { de->media_type = media[i]; return; } } } static void de21040_media_timer (unsigned long data) { struct de_private *de = (struct de_private *) data; struct net_device *dev = de->dev; u32 status = dr32(SIAStatus); unsigned int carrier; unsigned long flags; carrier = (status & NetCxnErr) ? 0 : 1; if (carrier) { if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) goto no_link_yet; de->media_timer.expires = jiffies + DE_TIMER_LINK; add_timer(&de->media_timer); if (!netif_carrier_ok(dev)) de_link_up(de); else netif_info(de, timer, dev, "%s link ok, status %x\n", media_name[de->media_type], status); return; } de_link_down(de); if (de->media_lock) return; if (de->media_type == DE_MEDIA_AUI) { static const u32 next_state = DE_MEDIA_TP; de_next_media(de, &next_state, 1); } else { static const u32 next_state = DE_MEDIA_AUI; de_next_media(de, &next_state, 1); } spin_lock_irqsave(&de->lock, flags); de_stop_rxtx(de); spin_unlock_irqrestore(&de->lock, flags); de_set_media(de); de_start_rxtx(de); no_link_yet: de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; add_timer(&de->media_timer); netif_info(de, timer, dev, "no link, trying media %s, status %x\n", media_name[de->media_type], status); } static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) { switch (new_media) { case DE_MEDIA_TP_AUTO: if (!(de->media_advertise & ADVERTISED_Autoneg)) return 0; if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full))) return 0; break; case DE_MEDIA_BNC: if (!(de->media_advertise & ADVERTISED_BNC)) return 0; break; case DE_MEDIA_AUI: if (!(de->media_advertise & ADVERTISED_AUI)) return 0; break; case DE_MEDIA_TP: if (!(de->media_advertise & ADVERTISED_10baseT_Half)) return 0; break; case DE_MEDIA_TP_FD: if (!(de->media_advertise & ADVERTISED_10baseT_Full)) return 0; break; } return 1; } static void de21041_media_timer (unsigned long data) { struct de_private *de = (struct de_private *) data; struct net_device *dev = de->dev; u32 status = dr32(SIAStatus); unsigned int carrier; unsigned long flags; /* clear port active bits */ dw32(SIAStatus, NonselPortActive | SelPortActive); carrier = (status & NetCxnErr) ? 0 : 1; if (carrier) { if ((de->media_type == DE_MEDIA_TP_AUTO || de->media_type == DE_MEDIA_TP || de->media_type == DE_MEDIA_TP_FD) && (status & LinkFailStatus)) goto no_link_yet; de->media_timer.expires = jiffies + DE_TIMER_LINK; add_timer(&de->media_timer); if (!netif_carrier_ok(dev)) de_link_up(de); else netif_info(de, timer, dev, "%s link ok, mode %x status %x\n", media_name[de->media_type], dr32(MacMode), status); return; } de_link_down(de); /* if media type locked, don't switch media */ if (de->media_lock) goto set_media; /* if activity detected, use that as hint for new media type */ if (status & NonselPortActive) { unsigned int have_media = 1; /* if AUI/BNC selected, then activity is on TP port */ if (de->media_type == DE_MEDIA_AUI || de->media_type == DE_MEDIA_BNC) { if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)) de->media_type = DE_MEDIA_TP_AUTO; else have_media = 0; } /* TP selected. If there is only TP and BNC, then it's BNC */ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) && de_ok_to_advertise(de, DE_MEDIA_BNC)) de->media_type = DE_MEDIA_BNC; /* TP selected. If there is only TP and AUI, then it's AUI */ else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) && de_ok_to_advertise(de, DE_MEDIA_AUI)) de->media_type = DE_MEDIA_AUI; /* otherwise, ignore the hint */ else have_media = 0; if (have_media) goto set_media; } /* * Absent or ambiguous activity hint, move to next advertised * media state. If de->media_type is left unchanged, this * simply resets the PHY and reloads the current media settings. */ if (de->media_type == DE_MEDIA_AUI) { static const u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; de_next_media(de, next_states, ARRAY_SIZE(next_states)); } else if (de->media_type == DE_MEDIA_BNC) { static const u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI }; de_next_media(de, next_states, ARRAY_SIZE(next_states)); } else { static const u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; de_next_media(de, next_states, ARRAY_SIZE(next_states)); } set_media: spin_lock_irqsave(&de->lock, flags); de_stop_rxtx(de); spin_unlock_irqrestore(&de->lock, flags); de_set_media(de); de_start_rxtx(de); no_link_yet: de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; add_timer(&de->media_timer); netif_info(de, timer, dev, "no link, trying media %s, status %x\n", media_name[de->media_type], status); } static void de_media_interrupt (struct de_private *de, u32 status) { if (status & LinkPass) { /* Ignore if current media is AUI or BNC and we can't use TP */ if ((de->media_type == DE_MEDIA_AUI || de->media_type == DE_MEDIA_BNC) && (de->media_lock || !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) return; /* If current media is not TP, change it to TP */ if ((de->media_type == DE_MEDIA_AUI || de->media_type == DE_MEDIA_BNC)) { de->media_type = DE_MEDIA_TP_AUTO; de_stop_rxtx(de); de_set_media(de); de_start_rxtx(de); } de_link_up(de); mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); return; } BUG_ON(!(status & LinkFail)); /* Mark the link as down only if current media is TP */ if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && de->media_type != DE_MEDIA_BNC) { de_link_down(de); mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); } } static int de_reset_mac (struct de_private *de) { u32 status, tmp; /* * Reset MAC. de4x5.c and tulip.c examined for "advice" * in this area. */ if (dr32(BusMode) == 0xffffffff) return -EBUSY; /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ dw32 (BusMode, CmdReset); mdelay (1); dw32 (BusMode, de_bus_mode); mdelay (1); for (tmp = 0; tmp < 5; tmp++) { dr32 (BusMode); mdelay (1); } mdelay (1); status = dr32(MacStatus); if (status & (RxState | TxState)) return -EBUSY; if (status == 0xffffffff) return -ENODEV; return 0; } static void de_adapter_wake (struct de_private *de) { u32 pmctl; if (de->de21040) return; pci_read_config_dword(de->pdev, PCIPM, &pmctl); if (pmctl & PM_Mask) { pmctl &= ~PM_Mask; pci_write_config_dword(de->pdev, PCIPM, pmctl); /* de4x5.c delays, so we do too */ msleep(10); } } static void de_adapter_sleep (struct de_private *de) { u32 pmctl; if (de->de21040) return; dw32(CSR13, 0); /* Reset phy */ pci_read_config_dword(de->pdev, PCIPM, &pmctl); pmctl |= PM_Sleep; pci_write_config_dword(de->pdev, PCIPM, pmctl); } static int de_init_hw (struct de_private *de) { struct net_device *dev = de->dev; u32 macmode; int rc; de_adapter_wake(de); macmode = dr32(MacMode) & ~MacModeClear; rc = de_reset_mac(de); if (rc) return rc; de_set_media(de); /* reset phy */ dw32(RxRingAddr, de->ring_dma); dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE)); dw32(MacMode, RxTx | macmode); dr32(RxMissed); /* self-clearing */ dw32(IntrMask, de_intr_mask); de_set_rx_mode(dev); return 0; } static int de_refill_rx (struct de_private *de) { unsigned i; for (i = 0; i < DE_RX_RING_SIZE; i++) { struct sk_buff *skb; skb = netdev_alloc_skb(de->dev, de->rx_buf_sz); if (!skb) goto err_out; de->rx_skb[i].mapping = pci_map_single(de->pdev, skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); de->rx_skb[i].skb = skb; de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); if (i == (DE_RX_RING_SIZE - 1)) de->rx_ring[i].opts2 = cpu_to_le32(RingEnd | de->rx_buf_sz); else de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz); de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping); de->rx_ring[i].addr2 = 0; } return 0; err_out: de_clean_rings(de); return -ENOMEM; } static int de_init_rings (struct de_private *de) { memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); de->rx_tail = 0; de->tx_head = de->tx_tail = 0; return de_refill_rx (de); } static int de_alloc_rings (struct de_private *de) { de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma); if (!de->rx_ring) return -ENOMEM; de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE]; return de_init_rings(de); } static void de_clean_rings (struct de_private *de) { unsigned i; memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE); de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); wmb(); memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); wmb(); for (i = 0; i < DE_RX_RING_SIZE; i++) { if (de->rx_skb[i].skb) { pci_unmap_single(de->pdev, de->rx_skb[i].mapping, de->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(de->rx_skb[i].skb); } } for (i = 0; i < DE_TX_RING_SIZE; i++) { struct sk_buff *skb = de->tx_skb[i].skb; if ((skb) && (skb != DE_DUMMY_SKB)) { if (skb != DE_SETUP_SKB) { de->net_stats.tx_dropped++; pci_unmap_single(de->pdev, de->tx_skb[i].mapping, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); } else { pci_unmap_single(de->pdev, de->tx_skb[i].mapping, sizeof(de->setup_frame), PCI_DMA_TODEVICE); } } } memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE); memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE); } static void de_free_rings (struct de_private *de) { de_clean_rings(de); pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma); de->rx_ring = NULL; de->tx_ring = NULL; } static int de_open (struct net_device *dev) { struct de_private *de = netdev_priv(dev); int rc; netif_dbg(de, ifup, dev, "enabling interface\n"); de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); rc = de_alloc_rings(de); if (rc) { netdev_err(dev, "ring allocation failure, err=%d\n", rc); return rc; } dw32(IntrMask, 0); rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { netdev_err(dev, "IRQ %d request failure, err=%d\n", dev->irq, rc); goto err_out_free; } rc = de_init_hw(de); if (rc) { netdev_err(dev, "h/w init failure, err=%d\n", rc); goto err_out_free_irq; } netif_start_queue(dev); mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); return 0; err_out_free_irq: free_irq(dev->irq, dev); err_out_free: de_free_rings(de); return rc; } static int de_close (struct net_device *dev) { struct de_private *de = netdev_priv(dev); unsigned long flags; netif_dbg(de, ifdown, dev, "disabling interface\n"); del_timer_sync(&de->media_timer); spin_lock_irqsave(&de->lock, flags); de_stop_hw(de); netif_stop_queue(dev); netif_carrier_off(dev); spin_unlock_irqrestore(&de->lock, flags); free_irq(dev->irq, dev); de_free_rings(de); de_adapter_sleep(de); return 0; } static void de_tx_timeout (struct net_device *dev) { struct de_private *de = netdev_priv(dev); netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), de->rx_tail, de->tx_head, de->tx_tail); del_timer_sync(&de->media_timer); disable_irq(dev->irq); spin_lock_irq(&de->lock); de_stop_hw(de); netif_stop_queue(dev); netif_carrier_off(dev); spin_unlock_irq(&de->lock); enable_irq(dev->irq); /* Update the error counts. */ __de_get_stats(de); synchronize_irq(dev->irq); de_clean_rings(de); de_init_rings(de); de_init_hw(de); netif_wake_queue(dev); } static void __de_get_regs(struct de_private *de, u8 *buf) { int i; u32 *rbuf = (u32 *)buf; /* read all CSRs */ for (i = 0; i < DE_NUM_REGS; i++) rbuf[i] = dr32(i * 8); /* handle self-clearing RxMissed counter, CSR8 */ de_rx_missed(de, rbuf[8]); } static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) { ecmd->supported = de->media_supported; ecmd->transceiver = XCVR_INTERNAL; ecmd->phy_address = 0; ecmd->advertising = de->media_advertise; switch (de->media_type) { case DE_MEDIA_AUI: ecmd->port = PORT_AUI; break; case DE_MEDIA_BNC: ecmd->port = PORT_BNC; break; default: ecmd->port = PORT_TP; break; } ethtool_cmd_speed_set(ecmd, 10); if (dr32(MacMode) & FullDuplex) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; if (de->media_lock) ecmd->autoneg = AUTONEG_DISABLE; else ecmd->autoneg = AUTONEG_ENABLE; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) { u32 new_media; unsigned int media_lock; if (ethtool_cmd_speed(ecmd) != 10) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) return -EINVAL; if (de->de21040 && ecmd->port == PORT_BNC) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if (ecmd->advertising & ~de->media_supported) return -EINVAL; if (ecmd->autoneg == AUTONEG_ENABLE && (!(ecmd->advertising & ADVERTISED_Autoneg))) return -EINVAL; switch (ecmd->port) { case PORT_AUI: new_media = DE_MEDIA_AUI; if (!(ecmd->advertising & ADVERTISED_AUI)) return -EINVAL; break; case PORT_BNC: new_media = DE_MEDIA_BNC; if (!(ecmd->advertising & ADVERTISED_BNC)) return -EINVAL; break; default: if (ecmd->autoneg == AUTONEG_ENABLE) new_media = DE_MEDIA_TP_AUTO; else if (ecmd->duplex == DUPLEX_FULL) new_media = DE_MEDIA_TP_FD; else new_media = DE_MEDIA_TP; if (!(ecmd->advertising & ADVERTISED_TP)) return -EINVAL; if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) return -EINVAL; break; } media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; if ((new_media == de->media_type) && (media_lock == de->media_lock) && (ecmd->advertising == de->media_advertise)) return 0; /* nothing to change */ de_link_down(de); mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); de_stop_rxtx(de); de->media_type = new_media; de->media_lock = media_lock; de->media_advertise = ecmd->advertising; de_set_media(de); if (netif_running(de->dev)) de_start_rxtx(de); return 0; } static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) { struct de_private *de = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info)); info->eedump_len = DE_EEPROM_SIZE; } static int de_get_regs_len(struct net_device *dev) { return DE_REGS_SIZE; } static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); rc = __de_get_settings(de, ecmd); spin_unlock_irq(&de->lock); return rc; } static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct de_private *de = netdev_priv(dev); int rc; spin_lock_irq(&de->lock); rc = __de_set_settings(de, ecmd); spin_unlock_irq(&de->lock); return rc; } static u32 de_get_msglevel(struct net_device *dev) { struct de_private *de = netdev_priv(dev); return de->msg_enable; } static void de_set_msglevel(struct net_device *dev, u32 msglvl) { struct de_private *de = netdev_priv(dev); de->msg_enable = msglvl; } static int de_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct de_private *de = netdev_priv(dev); if (!de->ee_data) return -EOPNOTSUPP; if ((eeprom->offset != 0) || (eeprom->magic != 0) || (eeprom->len != DE_EEPROM_SIZE)) return -EINVAL; memcpy(data, de->ee_data, eeprom->len); return 0; } static int de_nway_reset(struct net_device *dev) { struct de_private *de = netdev_priv(dev); u32 status; if (de->media_type != DE_MEDIA_TP_AUTO) return -EINVAL; if (netif_carrier_ok(de->dev)) de_link_down(de); status = dr32(SIAStatus); dw32(SIAStatus, (status & ~NWayState) | NWayRestart); netif_info(de, link, dev, "link nway restart, status %x,%x\n", status, dr32(SIAStatus)); return 0; } static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) { struct de_private *de = netdev_priv(dev); regs->version = (DE_REGS_VER << 2) | de->de21040; spin_lock_irq(&de->lock); __de_get_regs(de, data); spin_unlock_irq(&de->lock); } static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, .get_settings = de_get_settings, .set_settings = de_set_settings, .get_msglevel = de_get_msglevel, .set_msglevel = de_set_msglevel, .get_eeprom = de_get_eeprom, .nway_reset = de_nway_reset, .get_regs = de_get_regs, }; static void __devinit de21040_get_mac_address (struct de_private *de) { unsigned i; dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */ udelay(5); for (i = 0; i < 6; i++) { int value, boguscnt = 100000; do { value = dr32(ROMCmd); rmb(); } while (value < 0 && --boguscnt > 0); de->dev->dev_addr[i] = value; udelay(1); if (boguscnt <= 0) pr_warn("timeout reading 21040 MAC address byte %u\n", i); } } static void __devinit de21040_get_media_info(struct de_private *de) { unsigned int i; de->media_type = DE_MEDIA_TP; de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half | SUPPORTED_AUI; de->media_advertise = de->media_supported; for (i = 0; i < DE_MAX_MEDIA; i++) { switch (i) { case DE_MEDIA_AUI: case DE_MEDIA_TP: case DE_MEDIA_TP_FD: de->media[i].type = i; de->media[i].csr13 = t21040_csr13[i]; de->media[i].csr14 = t21040_csr14[i]; de->media[i].csr15 = t21040_csr15[i]; break; default: de->media[i].type = DE_MEDIA_INVALID; break; } } } /* Note: this routine returns extra data bits for size detection. */ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len) { int i; unsigned retval = 0; void __iomem *ee_addr = regs + ROMCmd; int read_cmd = location | (EE_READ_CMD << addr_len); writel(EE_ENB & ~EE_CS, ee_addr); writel(EE_ENB, ee_addr); /* Shift the read command bits out. */ for (i = 4 + addr_len; i >= 0; i--) { short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; writel(EE_ENB | dataval, ee_addr); readl(ee_addr); writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); readl(ee_addr); retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); } writel(EE_ENB, ee_addr); readl(ee_addr); for (i = 16; i > 0; i--) { writel(EE_ENB | EE_SHIFT_CLK, ee_addr); readl(ee_addr); retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); writel(EE_ENB, ee_addr); readl(ee_addr); } /* Terminate the EEPROM access. */ writel(EE_ENB & ~EE_CS, ee_addr); return retval; } static void __devinit de21041_get_srom_info (struct de_private *de) { unsigned i, sa_offset = 0, ofs; u8 ee_data[DE_EEPROM_SIZE + 6] = {}; unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6; struct de_srom_info_leaf *il; void *bufp; /* download entire eeprom */ for (i = 0; i < DE_EEPROM_WORDS; i++) ((__le16 *)ee_data)[i] = cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size)); /* DEC now has a specification but early board makers just put the address in the first EEPROM locations. */ /* This does memcmp(eedata, eedata+16, 8) */ #ifndef CONFIG_MIPS_COBALT for (i = 0; i < 8; i ++) if (ee_data[i] != ee_data[16+i]) sa_offset = 20; #endif /* store MAC address */ for (i = 0; i < 6; i ++) de->dev->dev_addr[i] = ee_data[i + sa_offset]; /* get offset of controller 0 info leaf. ignore 2nd byte. */ ofs = ee_data[SROMC0InfoLeaf]; if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block))) goto bad_srom; /* get pointer to info leaf */ il = (struct de_srom_info_leaf *) &ee_data[ofs]; /* paranoia checks */ if (il->n_blocks == 0) goto bad_srom; if ((sizeof(ee_data) - ofs) < (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks))) goto bad_srom; /* get default media type */ switch (get_unaligned(&il->default_media)) { case 0x0001: de->media_type = DE_MEDIA_BNC; break; case 0x0002: de->media_type = DE_MEDIA_AUI; break; case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; default: de->media_type = DE_MEDIA_TP_AUTO; break; } if (netif_msg_probe(de)) pr_info("de%d: SROM leaf offset %u, default media %s\n", de->board_idx, ofs, media_name[de->media_type]); /* init SIA register values to defaults */ for (i = 0; i < DE_MAX_MEDIA; i++) { de->media[i].type = DE_MEDIA_INVALID; de->media[i].csr13 = 0xffff; de->media[i].csr14 = 0xffff; de->media[i].csr15 = 0xffff; } /* parse media blocks to see what medias are supported, * and if any custom CSR values are provided */ bufp = ((void *)il) + sizeof(*il); for (i = 0; i < il->n_blocks; i++) { struct de_srom_media_block *ib = bufp; unsigned idx; /* index based on media type in media block */ switch(ib->opts & MediaBlockMask) { case 0: /* 10baseT */ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg; idx = DE_MEDIA_TP; de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; break; case 1: /* BNC */ de->media_supported |= SUPPORTED_BNC; idx = DE_MEDIA_BNC; break; case 2: /* AUI */ de->media_supported |= SUPPORTED_AUI; idx = DE_MEDIA_AUI; break; case 4: /* 10baseT-FD */ de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full | SUPPORTED_Autoneg; idx = DE_MEDIA_TP_FD; de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; break; default: goto bad_srom; } de->media[idx].type = idx; if (netif_msg_probe(de)) pr_info("de%d: media block #%u: %s", de->board_idx, i, media_name[de->media[idx].type]); bufp += sizeof (ib->opts); if (ib->opts & MediaCustomCSRs) { de->media[idx].csr13 = get_unaligned(&ib->csr13); de->media[idx].csr14 = get_unaligned(&ib->csr14); de->media[idx].csr15 = get_unaligned(&ib->csr15); bufp += sizeof(ib->csr13) + sizeof(ib->csr14) + sizeof(ib->csr15); if (netif_msg_probe(de)) pr_cont(" (%x,%x,%x)\n", de->media[idx].csr13, de->media[idx].csr14, de->media[idx].csr15); } else { if (netif_msg_probe(de)) pr_cont("\n"); } if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) break; } de->media_advertise = de->media_supported; fill_defaults: /* fill in defaults, for cases where custom CSRs not used */ for (i = 0; i < DE_MAX_MEDIA; i++) { if (de->media[i].csr13 == 0xffff) de->media[i].csr13 = t21041_csr13[i]; if (de->media[i].csr14 == 0xffff) { /* autonegotiation is broken at least on some chip revisions - rev. 0x21 works, 0x11 does not */ if (de->pdev->revision < 0x20) de->media[i].csr14 = t21041_csr14_brk[i]; else de->media[i].csr14 = t21041_csr14[i]; } if (de->media[i].csr15 == 0xffff) de->media[i].csr15 = t21041_csr15[i]; } de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL); return; bad_srom: /* for error cases, it's ok to assume we support all these */ for (i = 0; i < DE_MAX_MEDIA; i++) de->media[i].type = i; de->media_supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_AUI | SUPPORTED_BNC; goto fill_defaults; } static const struct net_device_ops de_netdev_ops = { .ndo_open = de_open, .ndo_stop = de_close, .ndo_set_rx_mode = de_set_rx_mode, .ndo_start_xmit = de_start_xmit, .ndo_get_stats = de_get_stats, .ndo_tx_timeout = de_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit de_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct de_private *de; int rc; void __iomem *regs; unsigned long pciaddr; static int board_idx = -1; board_idx++; #ifndef MODULE if (board_idx == 0) pr_info("%s\n", version); #endif /* allocate a new ethernet device structure, and fill in defaults */ dev = alloc_etherdev(sizeof(struct de_private)); if (!dev) return -ENOMEM; dev->netdev_ops = &de_netdev_ops; SET_NETDEV_DEV(dev, &pdev->dev); dev->ethtool_ops = &de_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; de = netdev_priv(dev); de->de21040 = ent->driver_data == 0 ? 1 : 0; de->pdev = pdev; de->dev = dev; de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug); de->board_idx = board_idx; spin_lock_init (&de->lock); init_timer(&de->media_timer); if (de->de21040) de->media_timer.function = de21040_media_timer; else de->media_timer.function = de21041_media_timer; de->media_timer.data = (unsigned long) de; netif_carrier_off(dev); /* wake up device, assign resources */ rc = pci_enable_device(pdev); if (rc) goto err_out_free; /* reserve PCI resources to ensure driver atomicity */ rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_disable; /* check for invalid IRQ value */ if (pdev->irq < 2) { rc = -EIO; pr_err("invalid irq (%d) for pci dev %s\n", pdev->irq, pci_name(pdev)); goto err_out_res; } dev->irq = pdev->irq; /* obtain and check validity of PCI I/O address */ pciaddr = pci_resource_start(pdev, 1); if (!pciaddr) { rc = -EIO; pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev)); goto err_out_res; } if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { rc = -EIO; pr_err("MMIO resource (%llx) too small on pci dev %s\n", (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev)); goto err_out_res; } /* remap CSR registers */ regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); if (!regs) { rc = -EIO; pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", (unsigned long long)pci_resource_len(pdev, 1), pciaddr, pci_name(pdev)); goto err_out_res; } dev->base_addr = (unsigned long) regs; de->regs = regs; de_adapter_wake(de); /* make sure hardware is not running */ rc = de_reset_mac(de); if (rc) { pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev)); goto err_out_iomap; } /* get MAC address, initialize default media type and * get list of supported media */ if (de->de21040) { de21040_get_mac_address(de); de21040_get_media_info(de); } else { de21041_get_srom_info(de); } /* register new network interface with kernel */ rc = register_netdev(dev); if (rc) goto err_out_iomap; /* print info about board and interface just registered */ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", de->de21040 ? "21040" : "21041", dev->base_addr, dev->dev_addr, dev->irq); pci_set_drvdata(pdev, dev); /* enable busmastering */ pci_set_master(pdev); /* put adapter to sleep */ de_adapter_sleep(de); return 0; err_out_iomap: kfree(de->ee_data); iounmap(regs); err_out_res: pci_release_regions(pdev); err_out_disable: pci_disable_device(pdev); err_out_free: free_netdev(dev); return rc; } static void __devexit de_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct de_private *de = netdev_priv(dev); BUG_ON(!dev); unregister_netdev(dev); kfree(de->ee_data); iounmap(de->regs); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); } #ifdef CONFIG_PM static int de_suspend (struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata (pdev); struct de_private *de = netdev_priv(dev); rtnl_lock(); if (netif_running (dev)) { del_timer_sync(&de->media_timer); disable_irq(dev->irq); spin_lock_irq(&de->lock); de_stop_hw(de); netif_stop_queue(dev); netif_device_detach(dev); netif_carrier_off(dev); spin_unlock_irq(&de->lock); enable_irq(dev->irq); /* Update the error counts. */ __de_get_stats(de); synchronize_irq(dev->irq); de_clean_rings(de); de_adapter_sleep(de); pci_disable_device(pdev); } else { netif_device_detach(dev); } rtnl_unlock(); return 0; } static int de_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct de_private *de = netdev_priv(dev); int retval = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; if (!netif_running(dev)) goto out_attach; if ((retval = pci_enable_device(pdev))) { netdev_err(dev, "pci_enable_device failed in resume\n"); goto out; } pci_set_master(pdev); de_init_rings(de); de_init_hw(de); out_attach: netif_device_attach(dev); out: rtnl_unlock(); return 0; } #endif /* CONFIG_PM */ static struct pci_driver de_driver = { .name = DRV_NAME, .id_table = de_pci_tbl, .probe = de_init_one, .remove = __devexit_p(de_remove_one), #ifdef CONFIG_PM .suspend = de_suspend, .resume = de_resume, #endif }; static int __init de_init (void) { #ifdef MODULE pr_info("%s\n", version); #endif return pci_register_driver(&de_driver); } static void __exit de_exit (void) { pci_unregister_driver (&de_driver); } module_init(de_init); module_exit(de_exit);
gpl-2.0
AOSPA-legacy/android_kernel_lge_g3
tools/perf/util/trace-event-read.c
4805
9873
/* * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License (not later!) * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define _FILE_OFFSET_BITS 64 #include <dirent.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <stdarg.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/wait.h> #include <sys/mman.h> #include <pthread.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include "../perf.h" #include "util.h" #include "trace-event.h" static int input_fd; static int read_page; int file_bigendian; int host_bigendian; static int long_size; static unsigned long page_size; static ssize_t calc_data_size; static bool repipe; static int do_read(int fd, void *buf, int size) { int rsize = size; while (size) { int ret = read(fd, buf, size); if (ret <= 0) return -1; if (repipe) { int retw = write(STDOUT_FILENO, buf, ret); if (retw <= 0 || retw != ret) die("repiping input file"); } size -= ret; buf += ret; } return rsize; } static int read_or_die(void *data, int size) { int r; r = do_read(input_fd, data, size); if (r <= 0) die("reading input file (size expected=%d received=%d)", size, r); if (calc_data_size) calc_data_size += r; return r; } /* If it fails, the next read will report it */ static void skip(int size) { char buf[BUFSIZ]; int r; while (size) { r = size > BUFSIZ ? BUFSIZ : size; read_or_die(buf, r); size -= r; }; } static unsigned int read4(void) { unsigned int data; read_or_die(&data, 4); return __data2host4(data); } static unsigned long long read8(void) { unsigned long long data; read_or_die(&data, 8); return __data2host8(data); } static char *read_string(void) { char buf[BUFSIZ]; char *str = NULL; int size = 0; off_t r; char c; for (;;) { r = read(input_fd, &c, 1); if (r < 0) die("reading input file"); if (!r) die("no data"); if (repipe) { int retw = write(STDOUT_FILENO, &c, 1); if (retw <= 0 || retw != r) die("repiping input file string"); } buf[size++] = c; if (!c) break; } if (calc_data_size) calc_data_size += size; str = malloc_or_die(size); memcpy(str, buf, size); return str; } static void read_proc_kallsyms(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size + 1); read_or_die(buf, size); buf[size] = '\0'; parse_proc_kallsyms(buf, size); free(buf); } static void read_ftrace_printk(void) { unsigned int size; char *buf; size = read4(); if (!size) return; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_printk(buf, size); free(buf); } static void read_header_files(void) { unsigned long long size; char *header_event; char buf[BUFSIZ]; read_or_die(buf, 12); if (memcmp(buf, "header_page", 12) != 0) die("did not read header page"); size = read8(); skip(size); /* * The size field in the page is of type long, * use that instead, since it represents the kernel. */ long_size = header_page_size_size; read_or_die(buf, 13); if (memcmp(buf, "header_event", 13) != 0) die("did not read header event"); size = read8(); header_event = malloc_or_die(size); read_or_die(header_event, size); free(header_event); } static void read_ftrace_file(unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_ftrace_file(buf, size); free(buf); } static void read_event_file(char *sys, unsigned long long size) { char *buf; buf = malloc_or_die(size); read_or_die(buf, size); parse_event_file(buf, size, sys); free(buf); } static void read_ftrace_files(void) { unsigned long long size; int count; int i; count = read4(); for (i = 0; i < count; i++) { size = read8(); read_ftrace_file(size); } } static void read_event_files(void) { unsigned long long size; char *sys; int systems; int count; int i,x; systems = read4(); for (i = 0; i < systems; i++) { sys = read_string(); count = read4(); for (x=0; x < count; x++) { size = read8(); read_event_file(sys, size); } } } struct cpu_data { unsigned long long offset; unsigned long long size; unsigned long long timestamp; struct record *next; char *page; int cpu; int index; int page_size; }; static struct cpu_data *cpu_data; static void update_cpu_data_index(int cpu) { cpu_data[cpu].offset += page_size; cpu_data[cpu].size -= page_size; cpu_data[cpu].index = 0; } static void get_next_page(int cpu) { off_t save_seek; off_t ret; if (!cpu_data[cpu].page) return; if (read_page) { if (cpu_data[cpu].size <= page_size) { free(cpu_data[cpu].page); cpu_data[cpu].page = NULL; return; } update_cpu_data_index(cpu); /* other parts of the code may expect the pointer to not move */ save_seek = lseek(input_fd, 0, SEEK_CUR); ret = lseek(input_fd, cpu_data[cpu].offset, SEEK_SET); if (ret == (off_t)-1) die("failed to lseek"); ret = read(input_fd, cpu_data[cpu].page, page_size); if (ret < 0) die("failed to read page"); /* reset the file pointer back */ lseek(input_fd, save_seek, SEEK_SET); return; } munmap(cpu_data[cpu].page, page_size); cpu_data[cpu].page = NULL; if (cpu_data[cpu].size <= page_size) return; update_cpu_data_index(cpu); cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, input_fd, cpu_data[cpu].offset); if (cpu_data[cpu].page == MAP_FAILED) die("failed to mmap cpu %d at offset 0x%llx", cpu, cpu_data[cpu].offset); } static unsigned int type_len4host(unsigned int type_len_ts) { if (file_bigendian) return (type_len_ts >> 27) & ((1 << 5) - 1); else return type_len_ts & ((1 << 5) - 1); } static unsigned int ts4host(unsigned int type_len_ts) { if (file_bigendian) return type_len_ts & ((1 << 27) - 1); else return type_len_ts >> 5; } static int calc_index(void *ptr, int cpu) { return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page; } struct record *trace_peek_data(int cpu) { struct record *data; void *page = cpu_data[cpu].page; int idx = cpu_data[cpu].index; void *ptr = page + idx; unsigned long long extend; unsigned int type_len_ts; unsigned int type_len; unsigned int delta; unsigned int length = 0; if (cpu_data[cpu].next) return cpu_data[cpu].next; if (!page) return NULL; if (!idx) { /* FIXME: handle header page */ if (header_page_ts_size != 8) die("expected a long long type for timestamp"); cpu_data[cpu].timestamp = data2host8(ptr); ptr += 8; switch (header_page_size_size) { case 4: cpu_data[cpu].page_size = data2host4(ptr); ptr += 4; break; case 8: cpu_data[cpu].page_size = data2host8(ptr); ptr += 8; break; default: die("bad long size"); } ptr = cpu_data[cpu].page + header_page_data_offset; } read_again: idx = calc_index(ptr, cpu); if (idx >= cpu_data[cpu].page_size) { get_next_page(cpu); return trace_peek_data(cpu); } type_len_ts = data2host4(ptr); ptr += 4; type_len = type_len4host(type_len_ts); delta = ts4host(type_len_ts); switch (type_len) { case RINGBUF_TYPE_PADDING: if (!delta) die("error, hit unexpected end of page"); length = data2host4(ptr); ptr += 4; length *= 4; ptr += length; goto read_again; case RINGBUF_TYPE_TIME_EXTEND: extend = data2host4(ptr); ptr += 4; extend <<= TS_SHIFT; extend += delta; cpu_data[cpu].timestamp += extend; goto read_again; case RINGBUF_TYPE_TIME_STAMP: ptr += 12; break; case 0: length = data2host4(ptr); ptr += 4; die("here! length=%d", length); break; default: length = type_len * 4; break; } cpu_data[cpu].timestamp += delta; data = malloc_or_die(sizeof(*data)); memset(data, 0, sizeof(*data)); data->ts = cpu_data[cpu].timestamp; data->size = length; data->data = ptr; ptr += length; cpu_data[cpu].index = calc_index(ptr, cpu); cpu_data[cpu].next = data; return data; } struct record *trace_read_data(int cpu) { struct record *data; data = trace_peek_data(cpu); cpu_data[cpu].next = NULL; return data; } ssize_t trace_report(int fd, bool __repipe) { char buf[BUFSIZ]; char test[] = { 23, 8, 68 }; char *version; int show_version = 0; int show_funcs = 0; int show_printk = 0; ssize_t size; calc_data_size = 1; repipe = __repipe; input_fd = fd; read_or_die(buf, 3); if (memcmp(buf, test, 3) != 0) die("no trace data in the file"); read_or_die(buf, 7); if (memcmp(buf, "tracing", 7) != 0) die("not a trace file (missing 'tracing' tag)"); version = read_string(); if (show_version) printf("version = %s\n", version); free(version); read_or_die(buf, 1); file_bigendian = buf[0]; host_bigendian = bigendian(); read_or_die(buf, 1); long_size = buf[0]; page_size = read4(); read_header_files(); read_ftrace_files(); read_event_files(); read_proc_kallsyms(); read_ftrace_printk(); size = calc_data_size - 1; calc_data_size = 0; repipe = false; if (show_funcs) { print_funcs(); return size; } if (show_printk) { print_printk(); return size; } return size; }
gpl-2.0
widz4rd/WIDzard-A850K
drivers/edac/cpc925_edac.c
5061
31950
/* * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. * * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: Cao Qingtao <qingtao.cao@windriver.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/edac.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/gfp.h> #include "edac_core.h" #include "edac_module.h" #define CPC925_EDAC_REVISION " Ver: 1.0.0" #define CPC925_EDAC_MOD_STR "cpc925_edac" #define cpc925_printk(level, fmt, arg...) \ edac_printk(level, "CPC925", fmt, ##arg) #define cpc925_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) /* * CPC925 registers are of 32 bits with bit0 defined at the * most significant bit and bit31 at that of least significant. */ #define CPC925_BITS_PER_REG 32 #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) /* * EDAC device names for the error detections of * CPU Interface and Hypertransport Link. */ #define CPC925_CPU_ERR_DEV "cpu" #define CPC925_HT_LINK_DEV "htlink" /* Suppose DDR Refresh cycle is 15.6 microsecond */ #define CPC925_REF_FREQ 0xFA69 #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ #define CPC925_NR_CSROWS 8 /* * All registers and bits definitions are taken from * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". */ /* * CPU and Memory Controller Registers */ /************************************************************ * Processor Interface Exception Mask Register (APIMASK) ************************************************************/ #define REG_APIMASK_OFFSET 0x30070 enum apimask_bits { APIMASK_DART = CPC925_BIT(0), /* DART Exception */ APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | APIMASK_ADRS1), ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), }; #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) /************************************************************ * Processor Interface Exception Register (APIEXCP) ************************************************************/ #define REG_APIEXCP_OFFSET 0x30060 enum apiexcp_bits { APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ /* BIT(7) Reserved */ APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | APIEXCP_ADRS1), UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), }; /************************************************************ * Memory Bus Configuration Register (MBCR) ************************************************************/ #define REG_MBCR_OFFSET 0x2190 #define MBCR_64BITCFG_SHIFT 23 #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) #define MBCR_64BITBUS_SHIFT 22 #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) /************************************************************ * Memory Bank Mode Register (MBMR) ************************************************************/ #define REG_MBMR_OFFSET 0x21C0 #define MBMR_MODE_MAX_VALUE 0xF #define MBMR_MODE_SHIFT 25 #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) #define MBMR_BBA_SHIFT 24 #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) /************************************************************ * Memory Bank Boundary Address Register (MBBAR) ************************************************************/ #define REG_MBBAR_OFFSET 0x21D0 #define MBBAR_BBA_MAX_VALUE 0xFF #define MBBAR_BBA_SHIFT 24 #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) /************************************************************ * Memory Scrub Control Register (MSCR) ************************************************************/ #define REG_MSCR_OFFSET 0x2400 #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ #define MSCR_SI_MAX_VALUE 0xFF #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) /************************************************************ * Memory Scrub Range Start Register (MSRSR) ************************************************************/ #define REG_MSRSR_OFFSET 0x2410 /************************************************************ * Memory Scrub Range End Register (MSRER) ************************************************************/ #define REG_MSRER_OFFSET 0x2420 /************************************************************ * Memory Scrub Pattern Register (MSPR) ************************************************************/ #define REG_MSPR_OFFSET 0x2430 /************************************************************ * Memory Check Control Register (MCCR) ************************************************************/ #define REG_MCCR_OFFSET 0x2440 enum mccr_bits { MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ }; /************************************************************ * Memory Check Range End Register (MCRER) ************************************************************/ #define REG_MCRER_OFFSET 0x2450 /************************************************************ * Memory Error Address Register (MEAR) ************************************************************/ #define REG_MEAR_OFFSET 0x2460 #define MEAR_BCNT_MAX_VALUE 0x3 #define MEAR_BCNT_SHIFT 30 #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) #define MEAR_RANK_MAX_VALUE 0x7 #define MEAR_RANK_SHIFT 27 #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) #define MEAR_COL_MAX_VALUE 0x7FF #define MEAR_COL_SHIFT 16 #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) #define MEAR_BANK_MAX_VALUE 0x3 #define MEAR_BANK_SHIFT 14 #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) #define MEAR_ROW_MASK 0x00003FFF /************************************************************ * Memory Error Syndrome Register (MESR) ************************************************************/ #define REG_MESR_OFFSET 0x2470 #define MESR_ECC_SYN_H_MASK 0xFF00 #define MESR_ECC_SYN_L_MASK 0x00FF /************************************************************ * Memory Mode Control Register (MMCR) ************************************************************/ #define REG_MMCR_OFFSET 0x2500 enum mmcr_bits { MMCR_REG_DIMM_MODE = CPC925_BIT(3), }; /* * HyperTransport Link Registers */ /************************************************************ * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) ************************************************************/ #define REG_ERRCTRL_OFFSET 0x70140 enum errctrl_bits { /* nonfatal interrupts for */ ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), }; /************************************************************ * Link Configuration and Link Control Register (LINKCTRL) ************************************************************/ #define REG_LINKCTRL_OFFSET 0x70110 enum linkctrl_bits { LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), LINKCTRL_LINK_FAIL = CPC925_BIT(27), HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), }; /************************************************************ * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) ************************************************************/ #define REG_LINKERR_OFFSET 0x70120 enum linkerr_bits { LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | LINKERR_PROT_ERR), }; /************************************************************ * Bridge Control Register (BRGCTRL) ************************************************************/ #define REG_BRGCTRL_OFFSET 0x70300 enum brgctrl_bits { BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ }; /* Private structure for edac memory controller */ struct cpc925_mc_pdata { void __iomem *vbase; unsigned long total_mem; const char *name; int edac_idx; }; /* Private structure for common edac device */ struct cpc925_dev_info { void __iomem *vbase; struct platform_device *pdev; char *ctl_name; int edac_idx; struct edac_device_ctl_info *edac_dev; void (*init)(struct cpc925_dev_info *dev_info); void (*exit)(struct cpc925_dev_info *dev_info); void (*check)(struct edac_device_ctl_info *edac_dev); }; /* Get total memory size from Open Firmware DTB */ static void get_total_mem(struct cpc925_mc_pdata *pdata) { struct device_node *np = NULL; const unsigned int *reg, *reg_end; int len, sw, aw; unsigned long start, size; np = of_find_node_by_type(NULL, "memory"); if (!np) return; aw = of_n_addr_cells(np); sw = of_n_size_cells(np); reg = (const unsigned int *)of_get_property(np, "reg", &len); reg_end = reg + len/4; pdata->total_mem = 0; do { start = of_read_number(reg, aw); reg += aw; size = of_read_number(reg, sw); reg += sw; debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, start, size); pdata->total_mem += size; } while (reg < reg_end); of_node_put(np); debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); } static void cpc925_init_csrows(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; int index; u32 mbmr, mbbar, bba; unsigned long row_size, last_nr_pages = 0; get_total_mem(pdata); for (index = 0; index < mci->nr_csrows; index++) { mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + 0x20 * index); mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + 0x20 + index); bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); if (bba == 0) continue; /* not populated */ csrow = &mci->csrows[index]; row_size = bba * (1UL << 28); /* 256M */ csrow->first_page = last_nr_pages; csrow->nr_pages = row_size >> PAGE_SHIFT; csrow->last_page = csrow->first_page + csrow->nr_pages - 1; last_nr_pages = csrow->last_page + 1; csrow->mtype = MEM_RDDR; csrow->edac_mode = EDAC_SECDED; switch (csrow->nr_channels) { case 1: /* Single channel */ csrow->grain = 32; /* four-beat burst of 32 bytes */ break; case 2: /* Dual channel */ default: csrow->grain = 64; /* four-beat burst of 64 bytes */ break; } switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { case 6: /* 0110, no way to differentiate X8 VS X16 */ case 5: /* 0101 */ case 8: /* 1000 */ csrow->dtype = DEV_X16; break; case 7: /* 0111 */ case 9: /* 1001 */ csrow->dtype = DEV_X8; break; default: csrow->dtype = DEV_UNKNOWN; break; } } } /* Enable memory controller ECC detection */ static void cpc925_mc_init(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apimask; u32 mccr; /* Enable various ECC error exceptions */ apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); if ((apimask & ECC_MASK_ENABLE) == 0) { apimask |= ECC_MASK_ENABLE; __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); } /* Enable ECC detection */ mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); if ((mccr & MCCR_ECC_EN) == 0) { mccr |= MCCR_ECC_EN; __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); } } /* Disable memory controller ECC detection */ static void cpc925_mc_exit(struct mem_ctl_info *mci) { /* * WARNING: * We are supposed to clear the ECC error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_mc_init() will * trigger machine check exception. * Also, it's ok to leave ECC error detection bits enabled, * since they are reset to 1 by default or by boot loader. */ return; } /* * Revert DDR column/row/bank addresses into page frame number and * offset in page. * * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), * physical address(PA) bits to column address(CA) bits mappings are: * CA 0 1 2 3 4 5 6 7 8 9 10 * PA 59 58 57 56 55 54 53 52 51 50 49 * * physical address(PA) bits to bank address(BA) bits mappings are: * BA 0 1 * PA 43 44 * * physical address(PA) bits to row address(RA) bits mappings are: * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 */ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, unsigned long *pfn, unsigned long *offset, int *csrow) { u32 bcnt, rank, col, bank, row; u32 c; unsigned long pa; int i; bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; row = mear & MEAR_ROW_MASK; *csrow = rank; #ifdef CONFIG_EDAC_DEBUG if (mci->csrows[rank].first_page == 0) { cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " "non-populated csrow, broken hardware?\n"); return; } #endif /* Revert csrow number */ pa = mci->csrows[rank].first_page << PAGE_SHIFT; /* Revert column address */ col += bcnt; for (i = 0; i < 11; i++) { c = col & 0x1; col >>= 1; pa |= c << (14 - i); } /* Revert bank address */ pa |= bank << 19; /* Revert row address, in 4 steps */ for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (26 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (21 + i); } for (i = 0; i < 4; i++) { c = row & 0x1; row >>= 1; pa |= c << (18 - i); } for (i = 0; i < 3; i++) { c = row & 0x1; row >>= 1; pa |= c << (29 - i); } *offset = pa & (PAGE_SIZE - 1); *pfn = pa >> PAGE_SHIFT; debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); } static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) { if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) return 0; if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) return 1; cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", syndrome); return 1; } /* Check memory controller registers for ECC errors */ static void cpc925_mc_check(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; u32 apiexcp; u32 mear; u32 mesr; u16 syndrome; unsigned long pfn = 0, offset = 0; int csrow = 0, channel = 0; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & ECC_EXCP_DETECTED) == 0) return; mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); /* Revert column/row addresses into page frame number, etc */ cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); if (apiexcp & CECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); channel = cpc925_mc_find_channel(mci, syndrome); edac_mc_handle_ce(mci, pfn, offset, syndrome, csrow, channel, mci->ctl_name); } if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); } cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", mesr); cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", syndrome); } /******************** CPU err device********************************/ static u32 cpc925_cpu_mask_disabled(void) { struct device_node *cpus; struct device_node *cpunode = NULL; static u32 mask = 0; /* use cached value if available */ if (mask != 0) return mask; mask = APIMASK_ADI0 | APIMASK_ADI1; cpus = of_find_node_by_path("/cpus"); if (cpus == NULL) { cpc925_printk(KERN_DEBUG, "No /cpus node !\n"); return 0; } while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { const u32 *reg = of_get_property(cpunode, "reg", NULL); if (strcmp(cpunode->type, "cpu")) { cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name); continue; } if (reg == NULL || *reg > 2) { cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name); continue; } mask &= ~APIMASK_ADI(*reg); } if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) { /* We assume that each CPU sits on it's own PI and that * for present CPUs the reg property equals to the PI * interface id */ cpc925_printk(KERN_WARNING, "Assuming PI id is equal to CPU MPIC id!\n"); } of_node_put(cpunode); of_node_put(cpus); return mask; } /* Enable CPU Errors detection */ static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) { u32 apimask; u32 cpumask; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); cpumask = cpc925_cpu_mask_disabled(); if (apimask & cpumask) { cpc925_printk(KERN_WARNING, "CPU(s) not present, " "but enabled in APIMASK, disabling\n"); apimask &= ~cpumask; } if ((apimask & CPU_MASK_ENABLE) == 0) apimask |= CPU_MASK_ENABLE; __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); } /* Disable CPU Errors detection */ static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) { /* * WARNING: * We are supposed to clear the CPU error detection bits, * and it will be no problem to do so. However, once they * are cleared here if we want to re-install CPC925 EDAC * module later, setting them up in cpc925_cpu_init() will * trigger machine check exception. * Also, it's ok to leave CPU error detection bits enabled, * since they are reset to 1 by default. */ return; } /* Check for CPU Errors */ static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 apiexcp; u32 apimask; /* APIEXCP is cleared when read */ apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); if ((apiexcp & CPU_EXCP_DETECTED) == 0) return; if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0) return; apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); cpc925_printk(KERN_INFO, "Processor Interface Fault\n" "Processor Interface register dump:\n"); cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); } /******************** HT Link err device****************************/ /* Enable HyperTransport Link Error detection */ static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { ht_errctrl |= HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } } /* Disable HyperTransport Link Error detection */ static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) { u32 ht_errctrl; ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); ht_errctrl &= ~HT_ERRCTRL_ENABLE; __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); } /* Check for HyperTransport Link errors */ static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) { struct cpc925_dev_info *dev_info = edac_dev->pvt_info; u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); if (!((brgctrl & BRGCTRL_DETSERR) || (linkctrl & HT_LINKCTRL_DETECTED) || (errctrl & HT_ERRCTRL_DETECTED) || (linkerr & HT_LINKERR_DETECTED))) return; cpc925_printk(KERN_INFO, "HT Link Fault\n" "HT register dump:\n"); cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", brgctrl); cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", linkctrl); cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", errctrl); cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", linkerr); /* Clear by write 1 */ if (brgctrl & BRGCTRL_DETSERR) __raw_writel(BRGCTRL_DETSERR, dev_info->vbase + REG_BRGCTRL_OFFSET); if (linkctrl & HT_LINKCTRL_DETECTED) __raw_writel(HT_LINKCTRL_DETECTED, dev_info->vbase + REG_LINKCTRL_OFFSET); /* Initiate Secondary Bus Reset to clear the chain failure */ if (errctrl & ERRCTRL_CHN_FAL) __raw_writel(BRGCTRL_SECBUSRESET, dev_info->vbase + REG_BRGCTRL_OFFSET); if (errctrl & ERRCTRL_RSP_ERR) __raw_writel(ERRCTRL_RSP_ERR, dev_info->vbase + REG_ERRCTRL_OFFSET); if (linkerr & HT_LINKERR_DETECTED) __raw_writel(HT_LINKERR_DETECTED, dev_info->vbase + REG_LINKERR_OFFSET); edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); } static struct cpc925_dev_info cpc925_devs[] = { { .ctl_name = CPC925_CPU_ERR_DEV, .init = cpc925_cpu_init, .exit = cpc925_cpu_exit, .check = cpc925_cpu_check, }, { .ctl_name = CPC925_HT_LINK_DEV, .init = cpc925_htlink_init, .exit = cpc925_htlink_exit, .check = cpc925_htlink_check, }, {0}, /* Terminated by NULL */ }; /* * Add CPU Err detection and HyperTransport Link Err detection * as common "edac_device", they have no corresponding device * nodes in the Open Firmware DTB and we have to add platform * devices for them. Also, they will share the MMIO with that * of memory controller. */ static void cpc925_add_edac_devices(void __iomem *vbase) { struct cpc925_dev_info *dev_info; if (!vbase) { cpc925_printk(KERN_ERR, "MMIO not established yet\n"); return; } for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { dev_info->vbase = vbase; dev_info->pdev = platform_device_register_simple( dev_info->ctl_name, 0, NULL, 0); if (IS_ERR(dev_info->pdev)) { cpc925_printk(KERN_ERR, "Can't register platform device for %s\n", dev_info->ctl_name); continue; } /* * Don't have to allocate private structure but * make use of cpc925_devs[] instead. */ dev_info->edac_idx = edac_device_alloc_index(); dev_info->edac_dev = edac_device_alloc_ctl_info(0, dev_info->ctl_name, 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); if (!dev_info->edac_dev) { cpc925_printk(KERN_ERR, "No memory for edac device\n"); goto err1; } dev_info->edac_dev->pvt_info = dev_info; dev_info->edac_dev->dev = &dev_info->pdev->dev; dev_info->edac_dev->ctl_name = dev_info->ctl_name; dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); if (edac_op_state == EDAC_OPSTATE_POLL) dev_info->edac_dev->edac_check = dev_info->check; if (dev_info->init) dev_info->init(dev_info); if (edac_device_add_device(dev_info->edac_dev) > 0) { cpc925_printk(KERN_ERR, "Unable to add edac device for %s\n", dev_info->ctl_name); goto err2; } debugf0("%s: Successfully added edac device for %s\n", __func__, dev_info->ctl_name); continue; err2: if (dev_info->exit) dev_info->exit(dev_info); edac_device_free_ctl_info(dev_info->edac_dev); err1: platform_device_unregister(dev_info->pdev); } } /* * Delete the common "edac_device" for CPU Err Detection * and HyperTransport Link Err Detection */ static void cpc925_del_edac_devices(void) { struct cpc925_dev_info *dev_info; for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { if (dev_info->edac_dev) { edac_device_del_device(dev_info->edac_dev->dev); edac_device_free_ctl_info(dev_info->edac_dev); platform_device_unregister(dev_info->pdev); } if (dev_info->exit) dev_info->exit(dev_info); debugf0("%s: Successfully deleted edac device for %s\n", __func__, dev_info->ctl_name); } } /* Convert current back-ground scrub rate into byte/sec bandwidth */ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) { struct cpc925_mc_pdata *pdata = mci->pvt_info; int bw; u32 mscr; u8 si; mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || (si == 0)) { cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); bw = 0; } else bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; return bw; } /* Return 0 for single channel; 1 for dual channel */ static int cpc925_mc_get_channels(void __iomem *vbase) { int dual = 0; u32 mbcr; mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); /* * Dual channel only when 128-bit wide physical bus * and 128-bit configuration. */ if (((mbcr & MBCR_64BITCFG_MASK) == 0) && ((mbcr & MBCR_64BITBUS_MASK) == 0)) dual = 1; debugf0("%s: %s channel\n", __func__, (dual > 0) ? "Dual" : "Single"); return dual; } static int __devinit cpc925_probe(struct platform_device *pdev) { static int edac_mc_idx; struct mem_ctl_info *mci; void __iomem *vbase; struct cpc925_mc_pdata *pdata; struct resource *r; int res = 0, nr_channels; debugf0("%s: %s platform device found!\n", __func__, pdev->name); if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { res = -ENOMEM; goto out; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { cpc925_printk(KERN_ERR, "Unable to get resource\n"); res = -ENOENT; goto err1; } if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r), pdev->name)) { cpc925_printk(KERN_ERR, "Unable to request mem region\n"); res = -EBUSY; goto err1; } vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!vbase) { cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); res = -ENOMEM; goto err2; } nr_channels = cpc925_mc_get_channels(vbase); mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); if (!mci) { cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); res = -ENOMEM; goto err2; } pdata = mci->pvt_info; pdata->vbase = vbase; pdata->edac_idx = edac_mc_idx++; pdata->name = pdev->name; mci->dev = &pdev->dev; platform_set_drvdata(pdev, mci); mci->dev_name = dev_name(&pdev->dev); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = CPC925_EDAC_MOD_STR; mci->mod_ver = CPC925_EDAC_REVISION; mci->ctl_name = pdev->name; if (edac_op_state == EDAC_OPSTATE_POLL) mci->edac_check = cpc925_mc_check; mci->ctl_page_to_phys = NULL; mci->scrub_mode = SCRUB_SW_SRC; mci->set_sdram_scrub_rate = NULL; mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; cpc925_init_csrows(mci); /* Setup memory controller registers */ cpc925_mc_init(mci); if (edac_mc_add_mc(mci) > 0) { cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); goto err3; } cpc925_add_edac_devices(vbase); /* get this far and it's successful */ debugf0("%s: success\n", __func__); res = 0; goto out; err3: cpc925_mc_exit(mci); edac_mc_free(mci); err2: devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); err1: devres_release_group(&pdev->dev, cpc925_probe); out: return res; } static int cpc925_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); /* * Delete common edac devices before edac mc, because * the former share the MMIO of the latter. */ cpc925_del_edac_devices(); cpc925_mc_exit(mci); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); return 0; } static struct platform_driver cpc925_edac_driver = { .probe = cpc925_probe, .remove = cpc925_remove, .driver = { .name = "cpc925_edac", } }; static int __init cpc925_edac_init(void) { int ret = 0; printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); /* Only support POLL mode so far */ edac_op_state = EDAC_OPSTATE_POLL; ret = platform_driver_register(&cpc925_edac_driver); if (ret) { printk(KERN_WARNING "Failed to register %s\n", CPC925_EDAC_MOD_STR); } return ret; } static void __exit cpc925_edac_exit(void) { platform_driver_unregister(&cpc925_edac_driver); } module_init(cpc925_edac_init); module_exit(cpc925_edac_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
gpl-2.0
MinoochX/i9500-mxk
drivers/video/omap2/displays/panel-dvi.c
5061
7414
/* * DVI output support * * Copyright (C) 2011 Texas Instruments Inc * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/slab.h> #include <video/omapdss.h> #include <linux/i2c.h> #include <drm/drm_edid.h> #include <video/omap-panel-dvi.h> static const struct omap_video_timings panel_dvi_default_timings = { .x_res = 640, .y_res = 480, .pixel_clock = 23500, .hfp = 48, .hsw = 32, .hbp = 80, .vfp = 3, .vsw = 4, .vbp = 7, }; struct panel_drv_data { struct omap_dss_device *dssdev; struct mutex lock; }; static inline struct panel_dvi_platform_data *get_pdata(const struct omap_dss_device *dssdev) { return dssdev->data; } static int panel_dvi_power_on(struct omap_dss_device *dssdev) { struct panel_dvi_platform_data *pdata = get_pdata(dssdev); int r; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) return 0; r = omapdss_dpi_display_enable(dssdev); if (r) goto err0; if (pdata->platform_enable) { r = pdata->platform_enable(dssdev); if (r) goto err1; } return 0; err1: omapdss_dpi_display_disable(dssdev); err0: return r; } static void panel_dvi_power_off(struct omap_dss_device *dssdev) { struct panel_dvi_platform_data *pdata = get_pdata(dssdev); if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return; if (pdata->platform_disable) pdata->platform_disable(dssdev); omapdss_dpi_display_disable(dssdev); } static int panel_dvi_probe(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata; ddata = kzalloc(sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; dssdev->panel.timings = panel_dvi_default_timings; dssdev->panel.config = OMAP_DSS_LCD_TFT; ddata->dssdev = dssdev; mutex_init(&ddata->lock); dev_set_drvdata(&dssdev->dev, ddata); return 0; } static void __exit panel_dvi_remove(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); mutex_lock(&ddata->lock); dev_set_drvdata(&dssdev->dev, NULL); mutex_unlock(&ddata->lock); kfree(ddata); } static int panel_dvi_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&ddata->lock); r = panel_dvi_power_on(dssdev); if (r == 0) dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&ddata->lock); return r; } static void panel_dvi_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); mutex_lock(&ddata->lock); panel_dvi_power_off(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; mutex_unlock(&ddata->lock); } static int panel_dvi_suspend(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); mutex_lock(&ddata->lock); panel_dvi_power_off(dssdev); dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; mutex_unlock(&ddata->lock); return 0; } static int panel_dvi_resume(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&ddata->lock); r = panel_dvi_power_on(dssdev); if (r == 0) dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&ddata->lock); return r; } static void panel_dvi_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); mutex_lock(&ddata->lock); dpi_set_timings(dssdev, timings); mutex_unlock(&ddata->lock); } static void panel_dvi_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); mutex_lock(&ddata->lock); *timings = dssdev->panel.timings; mutex_unlock(&ddata->lock); } static int panel_dvi_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); int r; mutex_lock(&ddata->lock); r = dpi_check_timings(dssdev, timings); mutex_unlock(&ddata->lock); return r; } static int panel_dvi_ddc_read(struct i2c_adapter *adapter, unsigned char *buf, u16 count, u8 offset) { int r, retries; for (retries = 3; retries > 0; retries--) { struct i2c_msg msgs[] = { { .addr = DDC_ADDR, .flags = 0, .len = 1, .buf = &offset, }, { .addr = DDC_ADDR, .flags = I2C_M_RD, .len = count, .buf = buf, } }; r = i2c_transfer(adapter, msgs, 2); if (r == 2) return 0; if (r != -EAGAIN) break; } return r < 0 ? r : -EIO; } static int panel_dvi_read_edid(struct omap_dss_device *dssdev, u8 *edid, int len) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); struct panel_dvi_platform_data *pdata = get_pdata(dssdev); struct i2c_adapter *adapter; int r, l, bytes_read; mutex_lock(&ddata->lock); if (pdata->i2c_bus_num == 0) { r = -ENODEV; goto err; } adapter = i2c_get_adapter(pdata->i2c_bus_num); if (!adapter) { dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n", pdata->i2c_bus_num); r = -EINVAL; goto err; } l = min(EDID_LENGTH, len); r = panel_dvi_ddc_read(adapter, edid, l, 0); if (r) goto err; bytes_read = l; /* if there are extensions, read second block */ if (len > EDID_LENGTH && edid[0x7e] > 0) { l = min(EDID_LENGTH, len - EDID_LENGTH); r = panel_dvi_ddc_read(adapter, edid + EDID_LENGTH, l, EDID_LENGTH); if (r) goto err; bytes_read += l; } mutex_unlock(&ddata->lock); return bytes_read; err: mutex_unlock(&ddata->lock); return r; } static bool panel_dvi_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); struct panel_dvi_platform_data *pdata = get_pdata(dssdev); struct i2c_adapter *adapter; unsigned char out; int r; mutex_lock(&ddata->lock); if (pdata->i2c_bus_num == 0) goto out; adapter = i2c_get_adapter(pdata->i2c_bus_num); if (!adapter) goto out; r = panel_dvi_ddc_read(adapter, &out, 1, 0); mutex_unlock(&ddata->lock); return r == 0; out: mutex_unlock(&ddata->lock); return true; } static struct omap_dss_driver panel_dvi_driver = { .probe = panel_dvi_probe, .remove = __exit_p(panel_dvi_remove), .enable = panel_dvi_enable, .disable = panel_dvi_disable, .suspend = panel_dvi_suspend, .resume = panel_dvi_resume, .set_timings = panel_dvi_set_timings, .get_timings = panel_dvi_get_timings, .check_timings = panel_dvi_check_timings, .read_edid = panel_dvi_read_edid, .detect = panel_dvi_detect, .driver = { .name = "dvi", .owner = THIS_MODULE, }, }; static int __init panel_dvi_init(void) { return omap_dss_register_driver(&panel_dvi_driver); } static void __exit panel_dvi_exit(void) { omap_dss_unregister_driver(&panel_dvi_driver); } module_init(panel_dvi_init); module_exit(panel_dvi_exit); MODULE_LICENSE("GPL");
gpl-2.0
steven676/ti-omap-encore-kernel3
arch/frv/kernel/pm-mb93093.c
13765
1429
/* * FR-V MB93093 Power Management Routines * * Copyright (c) 2004 Red Hat, Inc. * * Written by: msalter@redhat.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. * */ #include <linux/init.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/errno.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <asm/mb86943a.h> #include "local.h" static unsigned long imask; /* * Setup interrupt masks, etc to enable wakeup by power switch */ static void mb93093_power_switch_setup(void) { /* mask all but FPGA interrupt sources. */ imask = *(volatile unsigned long *)0xfeff9820; *(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000; } /* * Cleanup interrupt masks, etc after wakeup by power switch */ static void mb93093_power_switch_cleanup(void) { *(volatile unsigned long *)0xfeff9820 = imask; } /* * Return non-zero if wakeup irq was caused by power switch */ static int mb93093_power_switch_check(void) { return 1; } /* * Initialize power interface */ static int __init mb93093_pm_init(void) { __power_switch_wake_setup = mb93093_power_switch_setup; __power_switch_wake_check = mb93093_power_switch_check; __power_switch_wake_cleanup = mb93093_power_switch_cleanup; return 0; } __initcall(mb93093_pm_init);
gpl-2.0
changbindu/linux-ok6410
arch/frv/kernel/pm-mb93093.c
13765
1429
/* * FR-V MB93093 Power Management Routines * * Copyright (c) 2004 Red Hat, Inc. * * Written by: msalter@redhat.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License. * */ #include <linux/init.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/errno.h> #include <linux/delay.h> #include <asm/uaccess.h> #include <asm/mb86943a.h> #include "local.h" static unsigned long imask; /* * Setup interrupt masks, etc to enable wakeup by power switch */ static void mb93093_power_switch_setup(void) { /* mask all but FPGA interrupt sources. */ imask = *(volatile unsigned long *)0xfeff9820; *(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000; } /* * Cleanup interrupt masks, etc after wakeup by power switch */ static void mb93093_power_switch_cleanup(void) { *(volatile unsigned long *)0xfeff9820 = imask; } /* * Return non-zero if wakeup irq was caused by power switch */ static int mb93093_power_switch_check(void) { return 1; } /* * Initialize power interface */ static int __init mb93093_pm_init(void) { __power_switch_wake_setup = mb93093_power_switch_setup; __power_switch_wake_check = mb93093_power_switch_check; __power_switch_wake_cleanup = mb93093_power_switch_cleanup; return 0; } __initcall(mb93093_pm_init);
gpl-2.0
sminki/android_kernel_sony_u8500
net/ipv4/xfrm4_mode_transport.c
14277
2135
/* * xfrm4_mode_transport.c - Transport mode encapsulation for IPv4. * * Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/stringify.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> /* Add encapsulation header. * * The IP header will be moved forward to make space for the encapsulation * header. */ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; skb_set_network_header(skb, -x->props.header_len); skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + ihl; __skb_pull(skb, ihl); memmove(skb_network_header(skb), iph, ihl); return 0; } /* Remove encapsulation header. * * The IP header will be moved over the top of the encapsulation header. * * On entry, skb->h shall point to where the IP header should be and skb->nh * shall be set to where the IP header currently is. skb->data shall point * to the start of the payload. */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); skb_reset_transport_header(skb); return 0; } static struct xfrm_mode xfrm4_transport_mode = { .input = xfrm4_transport_input, .output = xfrm4_transport_output, .owner = THIS_MODULE, .encap = XFRM_MODE_TRANSPORT, }; static int __init xfrm4_transport_init(void) { return xfrm_register_mode(&xfrm4_transport_mode, AF_INET); } static void __exit xfrm4_transport_exit(void) { int err; err = xfrm_unregister_mode(&xfrm4_transport_mode, AF_INET); BUG_ON(err); } module_init(xfrm4_transport_init); module_exit(xfrm4_transport_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_TRANSPORT);
gpl-2.0
dchadic/linux-cmps107
net/netfilter/xt_TPROXY.c
198
17149
/* * Transparent proxy support for Linux/iptables * * Copyright (c) 2006-2010 BalaBit IT Ltd. * Author: Balazs Scheidler, Krisztian Kovacs * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <net/checksum.h> #include <net/udp.h> #include <net/tcp.h> #include <net/inet_sock.h> #include <net/inet_hashtables.h> #include <linux/inetdevice.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #define XT_TPROXY_HAVE_IPV6 1 #include <net/if_inet6.h> #include <net/addrconf.h> #include <net/inet6_hashtables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #endif #include <linux/netfilter/xt_TPROXY.h> enum nf_tproxy_lookup_t { NFT_LOOKUP_LISTENER, NFT_LOOKUP_ESTABLISHED, }; static bool tproxy_sk_is_transparent(struct sock *sk) { switch (sk->sk_state) { case TCP_TIME_WAIT: if (inet_twsk(sk)->tw_transparent) return true; break; case TCP_NEW_SYN_RECV: if (inet_rsk(inet_reqsk(sk))->no_srccheck) return true; break; default: if (inet_sk(sk)->transparent) return true; } sock_gen_put(sk); return false; } static inline __be32 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) { struct in_device *indev; __be32 laddr; if (user_laddr) return user_laddr; laddr = 0; rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); for_primary_ifa(indev) { laddr = ifa->ifa_local; break; } endfor_ifa(indev); rcu_read_unlock(); return laddr ? laddr : daddr; } /* * This is used when the user wants to intercept a connection matching * an explicit iptables rule. In this case the sockets are assumed * matching in preference order: * * - match: if there's a fully established connection matching the * _packet_ tuple, it is returned, assuming the redirection * already took place and we process a packet belonging to an * established connection * * - match: if there's a listening socket matching the redirection * (e.g. on-port & on-ip of the connection), it is returned, * regardless if it was bound to 0.0.0.0 or an explicit * address. The reasoning is that if there's an explicit rule, it * does not really matter if the listener is bound to an interface * or to 0. The user already stated that he wants redirection * (since he added the rule). * * Please note that there's an overlap between what a TPROXY target * and a socket match will match. Normally if you have both rules the * "socket" match will be the first one, effectively all packets * belonging to established connections going through that one. */ static inline struct sock * nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, const __be32 saddr, const __be32 daddr, const __be16 sport, const __be16 dport, const struct net_device *in, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; switch (protocol) { case IPPROTO_TCP: switch (lookup_type) { case NFT_LOOKUP_LISTENER: sk = inet_lookup_listener(net, &tcp_hashinfo, saddr, sport, daddr, dport, in->ifindex); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ break; case NFT_LOOKUP_ESTABLISHED: sk = inet_lookup_established(net, &tcp_hashinfo, saddr, sport, daddr, dport, in->ifindex); break; default: BUG(); } break; case IPPROTO_UDP: sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); if (sk) { int connected = (sk->sk_state == TCP_ESTABLISHED); int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || (lookup_type == NFT_LOOKUP_LISTENER && connected)) { sock_put(sk); sk = NULL; } } break; default: WARN_ON(1); sk = NULL; } pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n", protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); return sk; } #ifdef XT_TPROXY_HAVE_IPV6 static inline struct sock * nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, const struct in6_addr *saddr, const struct in6_addr *daddr, const __be16 sport, const __be16 dport, const struct net_device *in, const enum nf_tproxy_lookup_t lookup_type) { struct sock *sk; switch (protocol) { case IPPROTO_TCP: switch (lookup_type) { case NFT_LOOKUP_LISTENER: sk = inet6_lookup_listener(net, &tcp_hashinfo, saddr, sport, daddr, ntohs(dport), in->ifindex); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ break; case NFT_LOOKUP_ESTABLISHED: sk = __inet6_lookup_established(net, &tcp_hashinfo, saddr, sport, daddr, ntohs(dport), in->ifindex); break; default: BUG(); } break; case IPPROTO_UDP: sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, in->ifindex); if (sk) { int connected = (sk->sk_state == TCP_ESTABLISHED); int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr); /* NOTE: we return listeners even if bound to * 0.0.0.0, those are filtered out in * xt_socket, since xt_TPROXY needs 0 bound * listeners too */ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || (lookup_type == NFT_LOOKUP_LISTENER && connected)) { sock_put(sk); sk = NULL; } } break; default: WARN_ON(1); sk = NULL; } pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n", protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); return sk; } #endif /** * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @laddr: IPv4 address to redirect to or zero. * @lport: TCP port to redirect to or zero. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait4() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr _hdr, *hp; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v4(net, iph->protocol, iph->saddr, laddr ? laddr : iph->daddr, hp->source, lport ? lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } return sk; } /* assign a socket to the skb -- consumes sk */ static void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { skb_orphan(skb); skb->sk = sk; skb->destructor = sock_edemux; } static unsigned int tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, u_int32_t mark_mask, u_int32_t mark_value) { const struct iphdr *iph = ip_hdr(skb); struct udphdr _hdr, *hp; struct sock *sk; hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); if (hp == NULL) return NF_DROP; /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v4(net, iph->protocol, iph->saddr, iph->daddr, hp->source, hp->dest, skb->dev, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr4(skb, laddr, iph->daddr); if (!lport) lport = hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait4(net, skb, laddr, lport, sk); else if (!sk) /* no, there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v4(net, iph->protocol, iph->saddr, laddr, hp->source, lport, skb->dev, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~mark_mask) ^ mark_value; pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->daddr, ntohs(hp->dest), &laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", iph->protocol, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static unsigned int tproxy_tg4_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info *tgi = par->targinfo; return tproxy_tg4(par->net, skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); } static unsigned int tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; return tproxy_tg4(par->net, skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); } #ifdef XT_TPROXY_HAVE_IPV6 static inline const struct in6_addr * tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, const struct in6_addr *daddr) { struct inet6_dev *indev; struct inet6_ifaddr *ifa; struct in6_addr *laddr; if (!ipv6_addr_any(user_laddr)) return user_laddr; laddr = NULL; rcu_read_lock(); indev = __in6_dev_get(skb->dev); if (indev) list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; laddr = &ifa->addr; break; } rcu_read_unlock(); return laddr ? laddr : daddr; } /** * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections * @skb: The skb being processed. * @tproto: Transport protocol. * @thoff: Transport protocol header offset. * @par: Iptables target parameters. * @sk: The TIME_WAIT TCP socket found by the lookup. * * We have to handle SYN packets arriving to TIME_WAIT sockets * differently: instead of reopening the connection we should rather * redirect the new connection to the proxy if there's a listener * socket present. * * tproxy_handle_time_wait6() consumes the socket reference passed in. * * Returns the listener socket if there's one, the TIME_WAIT socket if * no such listener is found, or NULL if the TCP header is incomplete. */ static struct sock * tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, const struct xt_action_param *par, struct sock *sk) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr _hdr, *hp; const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { inet_twsk_put(inet_twsk(sk)); return NULL; } if (hp->syn && !hp->rst && !hp->ack && !hp->fin) { /* SYN to a TIME_WAIT socket, we'd rather redirect it * to a listener socket if there's one */ struct sock *sk2; sk2 = nf_tproxy_get_sock_v6(par->net, tproto, &iph->saddr, tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr), hp->source, tgi->lport ? tgi->lport : hp->dest, skb->dev, NFT_LOOKUP_LISTENER); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } return sk; } static unsigned int tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; struct udphdr _hdr, *hp; struct sock *sk; const struct in6_addr *laddr; __be16 lport; int thoff = 0; int tproto; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { pr_debug("unable to find transport header in IPv6 packet, dropping\n"); return NF_DROP; } hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr); if (hp == NULL) { pr_debug("unable to grab transport header contents in IPv6 packet, dropping\n"); return NF_DROP; } /* check if there's an ongoing connection on the packet * addresses, this happens if the redirect already happened * and the current packet belongs to an already established * connection */ sk = nf_tproxy_get_sock_v6(par->net, tproto, &iph->saddr, &iph->daddr, hp->source, hp->dest, par->in, NFT_LOOKUP_ESTABLISHED); laddr = tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr); lport = tgi->lport ? tgi->lport : hp->dest; /* UDP has no TCP_TIME_WAIT state, so we never enter here */ if (sk && sk->sk_state == TCP_TIME_WAIT) /* reopening a TIME_WAIT connection needs special handling */ sk = tproxy_handle_time_wait6(skb, tproto, thoff, par, sk); else if (!sk) /* no there's no established connection, check if * there's a listener on the redirected addr/port */ sk = nf_tproxy_get_sock_v6(par->net, tproto, &iph->saddr, laddr, hp->source, lport, par->in, NFT_LOOKUP_LISTENER); /* NOTE: assign_sock consumes our sk reference */ if (sk && tproxy_sk_is_transparent(sk)) { /* This should be in a separate target, but we don't do multiple targets on the same rule yet */ skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), laddr, ntohs(lport), skb->mark); nf_tproxy_assign_sock(skb, sk); return NF_ACCEPT; } pr_debug("no socket, dropping: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", tproto, &iph->saddr, ntohs(hp->source), &iph->daddr, ntohs(hp->dest), skb->mark); return NF_DROP; } static int tproxy_tg6_check(const struct xt_tgchk_param *par) { const struct ip6t_ip6 *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IP6T_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } #endif static int tproxy_tg4_check(const struct xt_tgchk_param *par) { const struct ipt_ip *i = par->entryinfo; if ((i->proto == IPPROTO_TCP || i->proto == IPPROTO_UDP) && !(i->invflags & IPT_INV_PROTO)) return 0; pr_info("Can be used only in combination with " "either -p tcp or -p udp\n"); return -EINVAL; } static struct xt_target tproxy_tg_reg[] __read_mostly = { { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v0, .revision = 0, .targetsize = sizeof(struct xt_tproxy_target_info), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, { .name = "TPROXY", .family = NFPROTO_IPV4, .table = "mangle", .target = tproxy_tg4_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg4_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #ifdef XT_TPROXY_HAVE_IPV6 { .name = "TPROXY", .family = NFPROTO_IPV6, .table = "mangle", .target = tproxy_tg6_v1, .revision = 1, .targetsize = sizeof(struct xt_tproxy_target_info_v1), .checkentry = tproxy_tg6_check, .hooks = 1 << NF_INET_PRE_ROUTING, .me = THIS_MODULE, }, #endif }; static int __init tproxy_tg_init(void) { nf_defrag_ipv4_enable(); #ifdef XT_TPROXY_HAVE_IPV6 nf_defrag_ipv6_enable(); #endif return xt_register_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } static void __exit tproxy_tg_exit(void) { xt_unregister_targets(tproxy_tg_reg, ARRAY_SIZE(tproxy_tg_reg)); } module_init(tproxy_tg_init); module_exit(tproxy_tg_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Balazs Scheidler, Krisztian Kovacs"); MODULE_DESCRIPTION("Netfilter transparent proxy (TPROXY) target module."); MODULE_ALIAS("ipt_TPROXY"); MODULE_ALIAS("ip6t_TPROXY");
gpl-2.0
gic4107/HSA-linux
drivers/acpi/acpica/evregion.c
198
22668
/****************************************************************************** * * Module Name: evregion - Operation Region support * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #include "acinterp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evregion") extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ static void acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node); static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value); /******************************************************************************* * * FUNCTION: acpi_ev_initialize_op_regions * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Execute _REG methods for all Operation Regions that have * an installed default region handler. * ******************************************************************************/ acpi_status acpi_ev_initialize_op_regions(void) { acpi_status status; u32 i; ACPI_FUNCTION_TRACE(ev_initialize_op_regions); status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Run the _REG methods for op_regions in each default address space */ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) { /* * Make sure the installed handler is the DEFAULT handler. If not the * default, the _REG methods will have already been run (when the * handler was installed) */ if (acpi_ev_has_default_handler(acpi_gbl_root_node, acpi_gbl_default_address_spaces [i])) { status = acpi_ev_execute_reg_methods(acpi_gbl_root_node, acpi_gbl_default_address_spaces [i]); } } acpi_gbl_reg_methods_executed = TRUE; (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_address_space_dispatch * * PARAMETERS: region_obj - Internal region object * field_obj - Corresponding field. Can be NULL. * function - Read or Write operation * region_offset - Where in the region to read or write * bit_width - Field width in bits (8, 16, 32, or 64) * value - Pointer to in or out value, must be * a full 64-bit integer * * RETURN: Status * * DESCRIPTION: Dispatch an address space or operation region access to * a previously installed handler. * ******************************************************************************/ acpi_status acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, union acpi_operand_object *field_obj, u32 function, u32 region_offset, u32 bit_width, u64 *value) { acpi_status status; acpi_adr_space_handler handler; acpi_adr_space_setup region_setup; union acpi_operand_object *handler_desc; union acpi_operand_object *region_obj2; void *region_context = NULL; struct acpi_connection_info *context; ACPI_FUNCTION_TRACE(ev_address_space_dispatch); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } /* Ensure that there is a handler associated with this region */ handler_desc = region_obj->region.handler; if (!handler_desc) { ACPI_ERROR((AE_INFO, "No handler for Region [%4.4s] (%p) [%s]", acpi_ut_get_node_name(region_obj->region.node), region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } context = handler_desc->address_space.context; /* * It may be the case that the region has never been initialized. * Some types of regions require special init code */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { /* This region has not been initialized yet, do it */ region_setup = handler_desc->address_space.setup; if (!region_setup) { /* No initialization routine, exit with error */ ACPI_ERROR((AE_INFO, "No init routine for region(%p) [%s]", region_obj, acpi_ut_get_region_name(region_obj->region. space_id))); return_ACPI_STATUS(AE_NOT_EXIST); } /* * We must exit the interpreter because the region setup will * potentially execute control methods (for example, the _REG method * for this region) */ acpi_ex_exit_interpreter(); status = region_setup(region_obj, ACPI_REGION_ACTIVATE, context, &region_context); /* Re-enter the interpreter */ acpi_ex_enter_interpreter(); /* Check for failure of the Region Setup */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "During region initialization: [%s]", acpi_ut_get_region_name(region_obj-> region. space_id))); return_ACPI_STATUS(status); } /* Region initialization may have been completed by region_setup */ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) { region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE; /* * Save the returned context for use in all accesses to * the handler for this particular region */ if (!(region_obj2->extra.region_context)) { region_obj2->extra.region_context = region_context; } } } /* We have everything we need, we can invoke the address space handler */ handler = handler_desc->address_space.handler; ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", &region_obj->region.handler->address_space, handler, ACPI_FORMAT_NATIVE_UINT(region_obj->region.address + region_offset), acpi_ut_get_region_name(region_obj->region. space_id))); /* * Special handling for generic_serial_bus and general_purpose_io: * There are three extra parameters that must be passed to the * handler via the context: * 1) Connection buffer, a resource template from Connection() op. * 2) Length of the above buffer. * 3) Actual access length from the access_as() op. */ if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) || (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) && context && field_obj) { /* Get the Connection (resource_template) buffer */ context->connection = field_obj->field.resource_buffer; context->length = field_obj->field.resource_length; context->access_length = field_obj->field.access_length; } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * For handlers other than the default (supplied) handlers, we must * exit the interpreter because the handler *might* block -- we don't * know what it will do, so we can't hold the lock on the intepreter. */ acpi_ex_exit_interpreter(); } /* Call the handler */ status = handler(function, (region_obj->region.address + region_offset), bit_width, value, context, region_obj2->extra.region_context); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]", acpi_ut_get_region_name(region_obj->region. space_id))); } if (!(handler_desc->address_space.handler_flags & ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) { /* * We just returned from a non-default handler, we must re-enter the * interpreter */ acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_detach_region * * PARAMETERS: region_obj - Region Object * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * * DESCRIPTION: Break the association between the handler and the region * this is a two way association. * ******************************************************************************/ void acpi_ev_detach_region(union acpi_operand_object *region_obj, u8 acpi_ns_is_locked) { union acpi_operand_object *handler_obj; union acpi_operand_object *obj_desc; union acpi_operand_object **last_obj_ptr; acpi_adr_space_setup region_setup; void **region_context; union acpi_operand_object *region_obj2; acpi_status status; ACPI_FUNCTION_TRACE(ev_detach_region); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_VOID; } region_context = &region_obj2->extra.region_context; /* Get the address handler from the region object */ handler_obj = region_obj->region.handler; if (!handler_obj) { /* This region has no handler, all done */ return_VOID; } /* Find this region in the handler's list */ obj_desc = handler_obj->address_space.region_list; last_obj_ptr = &handler_obj->address_space.region_list; while (obj_desc) { /* Is this the correct Region? */ if (obj_desc == region_obj) { ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Removing Region %p from address handler %p\n", region_obj, handler_obj)); /* This is it, remove it from the handler's list */ *last_obj_ptr = obj_desc->region.next; obj_desc->region.next = NULL; /* Must clear field */ if (acpi_ns_is_locked) { status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } } /* Now stop region accesses by executing the _REG method */ status = acpi_ev_execute_reg_method(region_obj, ACPI_REG_DISCONNECT); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "from region _REG, [%s]", acpi_ut_get_region_name (region_obj->region.space_id))); } if (acpi_ns_is_locked) { status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_VOID; } } /* * If the region has been activated, call the setup handler with * the deactivate notification */ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) { region_setup = handler_obj->address_space.setup; status = region_setup(region_obj, ACPI_REGION_DEACTIVATE, handler_obj->address_space. context, region_context); /* * region_context should have been released by the deactivate * operation. We don't need access to it anymore here. */ if (region_context) { *region_context = NULL; } /* Init routine may fail, Just ignore errors */ if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "from region handler - deactivate, [%s]", acpi_ut_get_region_name (region_obj->region. space_id))); } region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); } /* * Remove handler reference in the region * * NOTE: this doesn't mean that the region goes away, the region * is just inaccessible as indicated to the _REG method * * If the region is on the handler's list, this must be the * region's handler */ region_obj->region.handler = NULL; acpi_ut_remove_reference(handler_obj); return_VOID; } /* Walk the linked list of handlers */ last_obj_ptr = &obj_desc->region.next; obj_desc = obj_desc->region.next; } /* If we get here, the region was not in the handler's region list */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Cannot remove region %p from address handler %p\n", region_obj, handler_obj)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_attach_region * * PARAMETERS: handler_obj - Handler Object * region_obj - Region Object * acpi_ns_is_locked - Namespace Region Already Locked? * * RETURN: None * * DESCRIPTION: Create the association between the handler and the region * this is a two way association. * ******************************************************************************/ acpi_status acpi_ev_attach_region(union acpi_operand_object *handler_obj, union acpi_operand_object *region_obj, u8 acpi_ns_is_locked) { ACPI_FUNCTION_TRACE(ev_attach_region); ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Adding Region [%4.4s] %p to address handler %p [%s]\n", acpi_ut_get_node_name(region_obj->region.node), region_obj, handler_obj, acpi_ut_get_region_name(region_obj->region. space_id))); /* Link this region to the front of the handler's list */ region_obj->region.next = handler_obj->address_space.region_list; handler_obj->address_space.region_list = region_obj; /* Install the region's handler */ if (region_obj->region.handler) { return_ACPI_STATUS(AE_ALREADY_EXISTS); } region_obj->region.handler = handler_obj; acpi_ut_add_reference(handler_obj); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_execute_reg_method * * PARAMETERS: region_obj - Region object * function - Passed to _REG: On (1) or Off (0) * * RETURN: Status * * DESCRIPTION: Execute _REG method for a region * ******************************************************************************/ acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) { struct acpi_evaluate_info *info; union acpi_operand_object *args[3]; union acpi_operand_object *region_obj2; acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_method); region_obj2 = acpi_ns_get_secondary_object(region_obj); if (!region_obj2) { return_ACPI_STATUS(AE_NOT_EXIST); } if (region_obj2->extra.method_REG == NULL) { return_ACPI_STATUS(AE_OK); } /* Allocate and initialize the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { return_ACPI_STATUS(AE_NO_MEMORY); } info->prefix_node = region_obj2->extra.method_REG; info->relative_pathname = NULL; info->parameters = args; info->flags = ACPI_IGNORE_RETURN_VALUE; /* * The _REG method has two arguments: * * arg0 - Integer: * Operation region space ID Same value as region_obj->Region.space_id * * arg1 - Integer: * connection status 1 for connecting the handler, 0 for disconnecting * the handler (Passed as a parameter) */ args[0] = acpi_ut_create_integer_object((u64)region_obj->region.space_id); if (!args[0]) { status = AE_NO_MEMORY; goto cleanup1; } args[1] = acpi_ut_create_integer_object((u64)function); if (!args[1]) { status = AE_NO_MEMORY; goto cleanup2; } args[2] = NULL; /* Terminate list */ /* Execute the method, no return value */ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, info->prefix_node, NULL)); status = acpi_ns_evaluate(info); acpi_ut_remove_reference(args[1]); cleanup2: acpi_ut_remove_reference(args[0]); cleanup1: ACPI_FREE(info); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_execute_reg_methods * * PARAMETERS: node - Namespace node for the device * space_id - The address space ID * * RETURN: Status * * DESCRIPTION: Run all _REG methods for the input Space ID; * Note: assumes namespace is locked, or system init time. * ******************************************************************************/ acpi_status acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id) { acpi_status status; ACPI_FUNCTION_TRACE(ev_execute_reg_methods); /* * Run all _REG methods for all Operation Regions for this space ID. This * is a separate walk in order to handle any interdependencies between * regions and _REG methods. (i.e. handlers must be installed for all * regions of this Space ID before we can run any _REG methods) */ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, NULL, &space_id, NULL); /* Special case for EC: handle "orphan" _REG methods with no region */ if (space_id == ACPI_ADR_SPACE_EC) { acpi_ev_orphan_ec_reg_method(node); } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_reg_run * * PARAMETERS: walk_namespace callback * * DESCRIPTION: Run _REG method for region objects of the requested spaceID * ******************************************************************************/ static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value) { union acpi_operand_object *obj_desc; struct acpi_namespace_node *node; acpi_adr_space_type space_id; acpi_status status; space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context); /* Convert and validate the device handle */ node = acpi_ns_validate_handle(obj_handle); if (!node) { return (AE_BAD_PARAMETER); } /* * We only care about regions.and objects that are allowed to have address * space handlers */ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) { return (AE_OK); } /* Check for an existing internal object */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { /* No object, just exit */ return (AE_OK); } /* Object is a Region */ if (obj_desc->region.space_id != space_id) { /* This region is for a different address space, just ignore it */ return (AE_OK); } status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT); return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_orphan_ec_reg_method * * PARAMETERS: ec_device_node - Namespace node for an EC device * * RETURN: None * * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC * device. This is a _REG method that has no corresponding region * within the EC device scope. The orphan _REG method appears to * have been enabled by the description of the ECDT in the ACPI * specification: "The availability of the region space can be * detected by providing a _REG method object underneath the * Embedded Controller device." * * To quickly access the EC device, we use the ec_device_node used * during EC handler installation. Otherwise, we would need to * perform a time consuming namespace walk, executing _HID * methods to find the EC device. * * MUTEX: Assumes the namespace is locked * ******************************************************************************/ static void acpi_ev_orphan_ec_reg_method(struct acpi_namespace_node *ec_device_node) { acpi_handle reg_method; struct acpi_namespace_node *next_node; acpi_status status; struct acpi_object_list args; union acpi_object objects[2]; ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method); if (!ec_device_node) { return_VOID; } /* Namespace is currently locked, must release */ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); /* Get a handle to a _REG method immediately under the EC device */ status = acpi_get_handle(ec_device_node, METHOD_NAME__REG, &reg_method); if (ACPI_FAILURE(status)) { goto exit; /* There is no _REG method present */ } /* * Execute the _REG method only if there is no Operation Region in * this scope with the Embedded Controller space ID. Otherwise, it * will already have been executed. Note, this allows for Regions * with other space IDs to be present; but the code below will then * execute the _REG method with the embedded_control space_ID argument. */ next_node = acpi_ns_get_next_node(ec_device_node, NULL); while (next_node) { if ((next_node->type == ACPI_TYPE_REGION) && (next_node->object) && (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) { goto exit; /* Do not execute the _REG */ } next_node = acpi_ns_get_next_node(ec_device_node, next_node); } /* Evaluate the _REG(embedded_control,Connect) method */ args.count = 2; args.pointer = objects; objects[0].type = ACPI_TYPE_INTEGER; objects[0].integer.value = ACPI_ADR_SPACE_EC; objects[1].type = ACPI_TYPE_INTEGER; objects[1].integer.value = ACPI_REG_CONNECT; status = acpi_evaluate_object(reg_method, NULL, &args, NULL); exit: /* We ignore all errors from above, don't care */ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); return_VOID; }
gpl-2.0
scoty755/Sense7_Kernel_b2wlj
drivers/mtd/devices/msm_qpic_nand.c
1478
80601
/* * Copyright (C) 2007 Google, Inc. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/crc16.h> #include <linux/bitrev.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/ctype.h> #include <mach/sps.h> #include <mach/msm_smem.h> #define PAGE_SIZE_2K 2048 #define PAGE_SIZE_4K 4096 #define WRITE 1 #define READ 0 /* * The maximum no of descriptors per transfer (page read/write) won't be more * than 64. For more details on what those commands are, please refer to the * page read and page write functions in the driver. */ #define SPS_MAX_DESC_NUM 64 #define SPS_DATA_CONS_PIPE_INDEX 0 #define SPS_DATA_PROD_PIPE_INDEX 1 #define SPS_CMD_CONS_PIPE_INDEX 2 #define msm_virt_to_dma(chip, vaddr) \ ((chip)->dma_phys_addr + \ ((uint8_t *)(vaddr) - (chip)->dma_virt_addr)) /* * A single page read/write request would typically need DMA memory of about * 1K memory approximately. So for a single request this memory is more than * enough. * * But to accommodate multiple clients we allocate 8K of memory. Though only * one client request can be submitted to NANDc at any time, other clients can * still prepare the descriptors while waiting for current client request to * be done. Thus for a total memory of 8K, the driver can currently support * maximum clients up to 7 or 8 at a time. The client for which there is no * free DMA memory shall wait on the wait queue until other clients free up * the required memory. */ #define MSM_NAND_DMA_BUFFER_SIZE SZ_8K /* * This defines the granularity at which the buffer management is done. The * total number of slots is based on the size of the atomic_t variable * dma_buffer_busy(number of bits) within the structure msm_nand_chip. */ #define MSM_NAND_DMA_BUFFER_SLOT_SZ \ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8)) /* ONFI(Open NAND Flash Interface) parameters */ #define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800 #define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000 #define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d #define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d #define ONFI_PARAM_INFO_LENGTH 0x0200 #define ONFI_PARAM_PAGE_LENGTH 0x0100 #define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F #define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20 #define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC #define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00 #define FLASH_READ_DEVICE_ID_ADDRESS 0x00 #define MSM_NAND_RESET_FLASH_STS 0x00000020 #define MSM_NAND_RESET_READ_STS 0x000000C0 /* QPIC NANDc (NAND Controller) Register Set */ #define MSM_NAND_REG(info, off) (info->nand_phys + off) #define MSM_NAND_QPIC_VERSION(info) MSM_NAND_REG(info, 0x20100) #define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000) #define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004) #define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008) #define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010) #define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014) #define FS_OP_ERR (1 << 4) #define FS_MPU_ERR (1 << 8) #define FS_DEVICE_STS_ERR (1 << 16) #define FS_DEVICE_WP (1 << 23) #define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018) #define BS_UNCORRECTABLE_BIT (1 << 8) #define BS_CORRECTABLE_ERR_MSK 0x1F #define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020) #define DISABLE_STATUS_AFTER_WRITE 4 #define CW_PER_PAGE 6 #define UD_SIZE_BYTES 9 #define SPARE_SIZE_BYTES 23 #define NUM_ADDR_CYCLES 27 #define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024) #define DEV0_CFG1_ECC_DISABLE 0 #define WIDE_FLASH 1 #define NAND_RECOVERY_CYCLES 2 #define CS_ACTIVE_BSY 5 #define BAD_BLOCK_BYTE_NUM 6 #define BAD_BLOCK_IN_SPARE_AREA 16 #define WR_RD_BSY_GAP 17 #define ENABLE_BCH_ECC 27 #define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028) #define ECC_CFG_ECC_DISABLE 0 #define ECC_SW_RESET 1 #define ECC_MODE 4 #define ECC_PARITY_SIZE_BYTES 8 #define ECC_NUM_DATA_BYTES 16 #define ECC_FORCE_CLK_OPEN 30 #define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040) #define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044) #define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4) #define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC) #define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0) #define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8) #define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC) #define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00) #define BAM_MODE_EN 0 #define MSM_NAND_VERSION(info) MSM_NAND_REG(info, 0x30F08) #define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20) #define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24) /* device commands */ #define MSM_NAND_CMD_PAGE_READ 0x32 #define MSM_NAND_CMD_PAGE_READ_ECC 0x33 #define MSM_NAND_CMD_PAGE_READ_ALL 0x34 #define MSM_NAND_CMD_PRG_PAGE 0x36 #define MSM_NAND_CMD_PRG_PAGE_ECC 0x37 #define MSM_NAND_CMD_PRG_PAGE_ALL 0x39 #define MSM_NAND_CMD_BLOCK_ERASE 0x3A #define MSM_NAND_CMD_FETCH_ID 0x0B /* Version Mask */ #define MSM_NAND_VERSION_MAJOR_MASK 0xF0000000 #define MSM_NAND_VERSION_MAJOR_SHIFT 28 #define MSM_NAND_VERSION_MINOR_MASK 0x0FFF0000 #define MSM_NAND_VERSION_MINOR_SHIFT 16 /* Structure that defines a NAND SPS command element */ struct msm_nand_sps_cmd { struct sps_command_element ce; uint32_t flags; }; /* * Structure that defines the NAND controller properties as per the * NAND flash device/chip that is attached. */ struct msm_nand_chip { struct device *dev; /* * DMA memory will be allocated only once during probe and this memory * will be used by all NAND clients. This wait queue is needed to * make the applications wait for DMA memory to be free'd when the * complete memory is exhausted. */ wait_queue_head_t dma_wait_queue; atomic_t dma_buffer_busy; uint8_t *dma_virt_addr; dma_addr_t dma_phys_addr; uint32_t ecc_parity_bytes; uint32_t bch_caps; /* Controller BCH ECC capabilities */ #define MSM_NAND_CAP_4_BIT_BCH (1 << 0) #define MSM_NAND_CAP_8_BIT_BCH (1 << 1) uint32_t cw_size; /* NANDc register configurations */ uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw; uint32_t ecc_buf_cfg; uint32_t ecc_bch_cfg; }; /* Structure that defines an SPS end point for a NANDc BAM pipe. */ struct msm_nand_sps_endpt { struct sps_pipe *handle; struct sps_connect config; struct sps_register_event event; struct completion completion; }; /* * Structure that defines NANDc SPS data - BAM handle and an end point * for each BAM pipe. */ struct msm_nand_sps_info { uint32_t bam_handle; struct msm_nand_sps_endpt data_prod; struct msm_nand_sps_endpt data_cons; struct msm_nand_sps_endpt cmd_pipe; }; /* * Structure that contains flash device information. This gets updated after * the NAND flash device detection. */ struct flash_identification { uint32_t flash_id; uint32_t density; uint32_t widebus; uint32_t pagesize; uint32_t blksize; uint32_t oobsize; uint32_t ecc_correctability; }; /* Structure that defines NANDc private data. */ struct msm_nand_info { struct mtd_info mtd; struct msm_nand_chip nand_chip; struct msm_nand_sps_info sps; unsigned long bam_phys; unsigned long nand_phys; void __iomem *bam_base; int bam_irq; /* * This lock must be acquired before submitting any command or data * descriptors to BAM pipes and must be held until all the submitted * descriptors are processed. * * This is required to ensure that both command and descriptors are * submitted atomically without interruption from other clients, * when there are requests from more than client at any time. * Othewise, data and command descriptors can be submitted out of * order for a request which can cause data corruption. */ struct mutex bam_lock; struct flash_identification flash_dev; }; /* Structure that defines an ONFI parameter page (512B) */ struct onfi_param_page { uint32_t parameter_page_signature; uint16_t revision_number; uint16_t features_supported; uint16_t optional_commands_supported; uint8_t reserved0[22]; uint8_t device_manufacturer[12]; uint8_t device_model[20]; uint8_t jedec_manufacturer_id; uint16_t date_code; uint8_t reserved1[13]; uint32_t number_of_data_bytes_per_page; uint16_t number_of_spare_bytes_per_page; uint32_t number_of_data_bytes_per_partial_page; uint16_t number_of_spare_bytes_per_partial_page; uint32_t number_of_pages_per_block; uint32_t number_of_blocks_per_logical_unit; uint8_t number_of_logical_units; uint8_t number_of_address_cycles; uint8_t number_of_bits_per_cell; uint16_t maximum_bad_blocks_per_logical_unit; uint16_t block_endurance; uint8_t guaranteed_valid_begin_blocks; uint16_t guaranteed_valid_begin_blocks_endurance; uint8_t number_of_programs_per_page; uint8_t partial_program_attributes; uint8_t number_of_bits_ecc_correctability; uint8_t number_of_interleaved_address_bits; uint8_t interleaved_operation_attributes; uint8_t reserved2[13]; uint8_t io_pin_capacitance; uint16_t timing_mode_support; uint16_t program_cache_timing_mode_support; uint16_t maximum_page_programming_time; uint16_t maximum_block_erase_time; uint16_t maximum_page_read_time; uint16_t maximum_change_column_setup_time; uint8_t reserved3[23]; uint16_t vendor_specific_revision_number; uint8_t vendor_specific[88]; uint16_t integrity_crc; } __attribute__((__packed__)); #define FLASH_PART_MAGIC1 0x55EE73AA #define FLASH_PART_MAGIC2 0xE35EBDDB #define FLASH_PTABLE_V3 3 #define FLASH_PTABLE_V4 4 #define FLASH_PTABLE_MAX_PARTS_V3 16 #define FLASH_PTABLE_MAX_PARTS_V4 32 #define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t)) #define FLASH_PTABLE_ENTRY_NAME_SIZE 16 struct flash_partition_entry { char name[FLASH_PTABLE_ENTRY_NAME_SIZE]; u32 offset; /* Offset in blocks from beginning of device */ u32 length; /* Length of the partition in blocks */ u8 attr; /* Flags for this partition */ }; struct flash_partition_table { u32 magic1; u32 magic2; u32 version; u32 numparts; struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4]; }; static struct flash_partition_table ptable; static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4]; /* * Get the DMA memory for requested amount of size. It returns the pointer * to free memory available from the allocated pool. Returns NULL if there * is no free memory. */ static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size) { uint32_t bitmask, free_bitmask, old_bitmask; uint32_t need_mask, current_need_mask; int free_index; need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ)) - 1; bitmask = atomic_read(&chip->dma_buffer_busy); free_bitmask = ~bitmask; do { free_index = __ffs(free_bitmask); current_need_mask = need_mask << free_index; if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >= MSM_NAND_DMA_BUFFER_SIZE) return NULL; if ((bitmask & current_need_mask) == 0) { old_bitmask = atomic_cmpxchg(&chip->dma_buffer_busy, bitmask, bitmask | current_need_mask); if (old_bitmask == bitmask) return chip->dma_virt_addr + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ; free_bitmask = 0;/* force return */ } /* current free range was too small, clear all free bits */ /* below the top busy bit within current_need_mask */ free_bitmask &= ~(~0U >> (32 - fls(bitmask & current_need_mask))); } while (free_bitmask); return NULL; } /* * Releases the DMA memory used to the free pool and also wakes up any user * thread waiting on wait queue for free memory to be available. */ static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip, void *buffer, size_t size) { int index; uint32_t used_mask; used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ)) - 1; index = ((uint8_t *)buffer - chip->dma_virt_addr) / MSM_NAND_DMA_BUFFER_SLOT_SZ; atomic_sub(used_mask << index, &chip->dma_buffer_busy); wake_up(&chip->dma_wait_queue); } /* * Calculates page address of the buffer passed, offset of buffer within * that page and then maps it for DMA by calling dma_map_page(). */ static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size, enum dma_data_direction dir) { struct page *page; unsigned long offset = (unsigned long)addr & ~PAGE_MASK; if (virt_addr_valid(addr)) page = virt_to_page(addr); else { if (WARN_ON(size + offset > PAGE_SIZE)) return ~0; page = vmalloc_to_page(addr); } return dma_map_page(dev, page, offset, size, dir); } /* * Wrapper function to prepare a SPS command element with the data that is * passed to this function. * * Since for any command element it is a must to have this flag * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a * command element that is passed and thus, the caller need not explicilty * pass this flag. The other flags must be passed based on the need. If a * command element doesn't have any other flag, then 0 can be passed to flags. */ static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd, uint32_t addr, uint32_t command, uint32_t data, uint32_t flags) { struct sps_command_element *cmd = &sps_cmd->ce; cmd->addr = addr; cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND : (uint32_t) SPS_READ_COMMAND; cmd->data = data; cmd->mask = 0xFFFFFFFF; sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags; } /* * Read a single NANDc register as mentioned by its parameter addr. The return * value indicates whether read is successful or not. The register value read * is stored in val. */ static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr, uint32_t *val) { int ret = 0; struct msm_nand_sps_cmd *cmd; struct msm_nand_chip *chip = &info->nand_chip; struct { struct msm_nand_sps_cmd cmd; uint32_t data; } *dma_buffer; wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( chip, sizeof(*dma_buffer)))); cmd = &dma_buffer->cmd; msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip, &dma_buffer->data), SPS_IOVEC_FLAG_INT); ret = sps_transfer_one(info->sps.cmd_pipe.handle, msm_virt_to_dma(chip, &cmd->ce), sizeof(struct sps_command_element), NULL, cmd->flags); if (ret) { pr_err("failed to submit command %x ret %d\n", addr, ret); goto out; } wait_for_completion_io(&info->sps.cmd_pipe.completion); *val = dma_buffer->data; out: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); return ret; } /* * Read the Flash ID from the Nand Flash Device. The return value < 0 * indicates failure. When successful, the Flash ID is stored in parameter * read_id. */ static int msm_nand_flash_read_id(struct msm_nand_info *info, bool read_onfi_signature, uint32_t *read_id) { int err = 0, i; struct msm_nand_sps_cmd *cmd; struct sps_iovec *iovec; struct msm_nand_chip *chip = &info->nand_chip; uint32_t total_cnt = 4; /* * The following 4 commands are required to read id - * write commands - addr0, flash, exec * read_commands - read_id */ struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; uint32_t data[total_cnt]; } *dma_buffer; wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer (chip, sizeof(*dma_buffer)))); if (read_onfi_signature) dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS; else dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS; dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID; dma_buffer->data[2] = 1; dma_buffer->data[3] = 0xeeeeeeee; cmd = dma_buffer->cmd; msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, dma_buffer->data[1], 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, dma_buffer->data[2], SPS_IOVEC_FLAG_NWD); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ, msm_virt_to_dma(chip, &dma_buffer->data[3]), SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); cmd++; BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[i].flags; iovec++; } mutex_lock(&info->bam_lock); err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (err) { pr_err("Failed to submit commands %d\n", err); mutex_unlock(&info->bam_lock); goto out; } wait_for_completion_io(&info->sps.cmd_pipe.completion); mutex_unlock(&info->bam_lock); pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]); if (!read_onfi_signature) pr_debug("nandid: %x maker %02x device %02x\n", dma_buffer->data[3], dma_buffer->data[3] & 0xff, (dma_buffer->data[3] >> 8) & 0xff); *read_id = dma_buffer->data[3]; out: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); return err; } /* * Contains data for common configuration registers that must be programmed * for every NANDc operation. */ struct msm_nand_common_cfgs { uint32_t cmd; uint32_t addr0; uint32_t addr1; uint32_t cfg0; uint32_t cfg1; }; /* * Function to prepare SPS command elements to write into NANDc configuration * registers as per the data defined in struct msm_nand_common_cfgs. This is * required for the following NANDc operations - Erase, Bad Block checking * and for reading ONFI parameter page. */ static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info, struct msm_nand_common_cfgs data, struct msm_nand_sps_cmd **curr_cmd) { struct msm_nand_sps_cmd *cmd; cmd = *curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd, SPS_IOVEC_FLAG_LOCK); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0); cmd++; *curr_cmd = cmd; } /* * Function to check the CRC integrity check on ONFI parameter page read. * For ONFI parameter page read, the controller ECC will be disabled. Hence, * it is mandatory to manually compute CRC and check it against the value * stored within ONFI page. */ static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count) { int i; uint16_t result; for (i = 0; i < count; i++) buffer[i] = bitrev8(buffer[i]); result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count)); for (i = 0; i < count; i++) buffer[i] = bitrev8(buffer[i]); return result; } /* * Structure that contains NANDc register data for commands required * for reading ONFI paramter page. */ struct msm_nand_flash_onfi_data { struct msm_nand_common_cfgs cfg; uint32_t exec; uint32_t devcmd1_orig; uint32_t devcmdvld_orig; uint32_t devcmd1_mod; uint32_t devcmdvld_mod; uint32_t ecc_bch_cfg; }; struct version { uint16_t nand_major; uint16_t nand_minor; uint16_t qpic_major; uint16_t qpic_minor; }; static int msm_nand_version_check(struct msm_nand_info *info, struct version *nandc_version) { uint32_t qpic_ver = 0, nand_ver = 0; int err = 0; /* Lookup the version to identify supported features */ err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info), &nand_ver); if (err) { pr_err("Failed to read NAND_VERSION, err=%d\n", err); goto out; } nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >> MSM_NAND_VERSION_MAJOR_SHIFT; nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >> MSM_NAND_VERSION_MINOR_SHIFT; err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info), &qpic_ver); if (err) { pr_err("Failed to read QPIC_VERSION, err=%d\n", err); goto out; } nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >> MSM_NAND_VERSION_MAJOR_SHIFT; nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >> MSM_NAND_VERSION_MINOR_SHIFT; pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n", nandc_version->nand_major, nandc_version->nand_minor, nandc_version->qpic_major, nandc_version->qpic_minor); out: return err; } /* * Function to identify whether the attached NAND flash device is * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter * page to get the device parameters. */ static int msm_nand_flash_onfi_probe(struct msm_nand_info *info) { struct msm_nand_chip *chip = &info->nand_chip; struct flash_identification *flash = &info->flash_dev; uint32_t crc_chk_count = 0, page_address = 0; int ret = 0, i; /* SPS parameters */ struct msm_nand_sps_cmd *cmd, *curr_cmd; struct sps_iovec *iovec; uint32_t rdata; /* ONFI Identifier/Parameter Page parameters */ uint8_t *onfi_param_info_buf = NULL; dma_addr_t dma_addr_param_info = 0; struct onfi_param_page *onfi_param_page_ptr; struct msm_nand_flash_onfi_data data; uint32_t onfi_signature = 0; /* SPS command/data descriptors */ uint32_t total_cnt = 13; /* * The following 13 commands are required to get onfi parameters - * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1, * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld. */ struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; uint32_t flash_status; } *dma_buffer; /* Lookup the version to identify supported features */ struct version nandc_version = {0}; ret = msm_nand_version_check(info, &nandc_version); if (!ret && !(nandc_version.nand_major == 1 && nandc_version.nand_minor == 1 && nandc_version.qpic_major == 1 && nandc_version.qpic_minor == 1)) { ret = -EPERM; goto out; } wait_event(chip->dma_wait_queue, (onfi_param_info_buf = msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH))); dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf); wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer (chip, sizeof(*dma_buffer)))); ret = msm_nand_flash_read_id(info, 1, &onfi_signature); if (ret < 0) { pr_err("Failed to read ONFI signature\n"); goto free_dma; } if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) { pr_info("Found a non ONFI device\n"); ret = -EIO; goto free_dma; } memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data)); ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info), &data.devcmd1_orig); if (ret < 0) goto free_dma; ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info), &data.devcmdvld_orig); if (ret < 0) goto free_dma; /* Lookup the 'APPS' partition's first page address */ for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) { if (!strncmp("apps", mtd_part[i].name, strlen(mtd_part[i].name))) { page_address = mtd_part[i].offset << 6; break; } } data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL; data.exec = 1; data.cfg.addr0 = (page_address << 16) | FLASH_READ_ONFI_PARAMETERS_ADDRESS; data.cfg.addr1 = (page_address >> 16) & 0xFF; data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO; data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO; data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) | FLASH_READ_ONFI_PARAMETERS_COMMAND; data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE; data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; dma_buffer->flash_status = 0xeeeeeeee; curr_cmd = cmd = dma_buffer->cmd; msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); cmd = curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE, data.ecc_bch_cfg, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE, data.devcmdvld_mod, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE, data.devcmd1_mod, 0); cmd++; rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31); msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec, SPS_IOVEC_FLAG_NWD); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->flash_status), 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE, data.devcmd1_orig, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE, data.devcmdvld_orig, SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); cmd++; BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[i].flags; iovec++; } mutex_lock(&info->bam_lock); /* Submit data descriptor */ ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info, ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT); if (ret) { pr_err("Failed to submit data descriptors %d\n", ret); mutex_unlock(&info->bam_lock); goto free_dma; } /* Submit command descriptors */ ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (ret) { pr_err("Failed to submit commands %d\n", ret); mutex_unlock(&info->bam_lock); goto free_dma; } wait_for_completion_io(&info->sps.cmd_pipe.completion); wait_for_completion_io(&info->sps.data_prod.completion); mutex_unlock(&info->bam_lock); /* Check for flash status errors */ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) { pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status); ret = -EIO; goto free_dma; } for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) { onfi_param_page_ptr = (struct onfi_param_page *) (&(onfi_param_info_buf [ONFI_PARAM_PAGE_LENGTH * crc_chk_count])); if (msm_nand_flash_onfi_crc_check( (uint8_t *)onfi_param_page_ptr, ONFI_PARAM_PAGE_LENGTH - 2) == onfi_param_page_ptr->integrity_crc) { break; } } if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH / ONFI_PARAM_PAGE_LENGTH) { pr_err("CRC Check failed on param page\n"); ret = -EIO; goto free_dma; } ret = msm_nand_flash_read_id(info, 0, &flash->flash_id); if (ret < 0) { pr_err("Failed to read flash ID\n"); goto free_dma; } flash->widebus = onfi_param_page_ptr->features_supported & 0x01; flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page; flash->blksize = onfi_param_page_ptr->number_of_pages_per_block * flash->pagesize; flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page; flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit * flash->blksize; flash->ecc_correctability = onfi_param_page_ptr-> number_of_bits_ecc_correctability; pr_info("Found an ONFI compliant device %s\n", onfi_param_page_ptr->device_model); /* * Temporary hack for MT29F4G08ABC device. * Since the device is not properly adhering * to ONFi specification it is reporting * as 16 bit device though it is 8 bit device!!! */ if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12)) flash->widebus = 0; free_dma: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); msm_nand_release_dma_buffer(chip, onfi_param_info_buf, ONFI_PARAM_INFO_LENGTH); out: return ret; } /* * Structure that contains read/write parameters required for reading/writing * from/to a page. */ struct msm_nand_rw_params { uint32_t page; uint32_t page_count; uint32_t sectordatasize; uint32_t sectoroobsize; uint32_t cwperpage; uint32_t oob_len_cmd; uint32_t oob_len_data; uint32_t start_sector; uint32_t oob_col; dma_addr_t data_dma_addr; dma_addr_t oob_dma_addr; dma_addr_t data_dma_addr_curr; dma_addr_t oob_dma_addr_curr; bool read; }; /* * Structure that contains NANDc register data required for reading/writing * from/to a page. */ struct msm_nand_rw_reg_data { uint32_t cmd; uint32_t addr0; uint32_t addr1; uint32_t cfg0; uint32_t cfg1; uint32_t ecc_bch_cfg; uint32_t exec; uint32_t ecc_cfg; uint32_t clrfstatus; uint32_t clrrstatus; }; /* * Function that validates page read/write MTD parameters received from upper * layers such as MTD/YAFFS2 and returns error for any unsupported operations * by the driver. In case of success, it also maps the data and oob buffer * received for DMA. */ static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read, loff_t offset, struct mtd_oob_ops *ops, struct msm_nand_rw_params *args) { struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; int err = 0; pr_debug("========================================================\n"); pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n", offset, ops->mode, ops->datbuf, ops->len); pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen); if (ops->mode == MTD_OPS_PLACE_OOB) { pr_err("MTD_OPS_PLACE_OOB is not supported\n"); err = -EINVAL; goto out; } if (mtd->writesize == PAGE_SIZE_2K) args->page = offset >> 11; if (mtd->writesize == PAGE_SIZE_4K) args->page = offset >> 12; args->oob_len_cmd = ops->ooblen; args->oob_len_data = ops->ooblen; args->cwperpage = (mtd->writesize >> 9); args->read = (read ? true : false); if (offset & (mtd->writesize - 1)) { pr_err("unsupported offset 0x%llx\n", offset); err = -EINVAL; goto out; } if (!read && !ops->datbuf) { pr_err("No data buffer provided for write!!\n"); err = -EINVAL; goto out; } if (ops->mode == MTD_OPS_RAW) { if (!ops->datbuf) { pr_err("No data buffer provided for RAW mode\n"); err = -EINVAL; goto out; } else if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) { pr_err("unsupported data len %d for RAW mode\n", ops->len); err = -EINVAL; goto out; } args->page_count = ops->len / (mtd->writesize + mtd->oobsize); } else if (ops->mode == MTD_OPS_AUTO_OOB) { if (ops->datbuf && (ops->len % mtd->writesize) != 0) { /* when ops->datbuf is NULL, ops->len can be ooblen */ pr_err("unsupported data len %d for AUTO mode\n", ops->len); err = -EINVAL; goto out; } if (read && ops->oobbuf && !ops->datbuf) { args->start_sector = args->cwperpage - 1; args->page_count = ops->ooblen / mtd->oobavail; if ((args->page_count == 0) && (ops->ooblen)) args->page_count = 1; } else if (ops->datbuf) { args->page_count = ops->len / mtd->writesize; } } if (ops->datbuf) { args->data_dma_addr_curr = args->data_dma_addr = msm_nand_dma_map(chip->dev, ops->datbuf, ops->len, (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); if (dma_mapping_error(chip->dev, args->data_dma_addr)) { pr_err("dma mapping failed for 0x%p\n", ops->datbuf); err = -EIO; goto out; } } if (ops->oobbuf) { if (read) memset(ops->oobbuf, 0xFF, ops->ooblen); args->oob_dma_addr_curr = args->oob_dma_addr = msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen, (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE)); if (dma_mapping_error(chip->dev, args->oob_dma_addr)) { pr_err("dma mapping failed for 0x%p\n", ops->oobbuf); err = -EIO; goto dma_map_oobbuf_failed; } } goto out; dma_map_oobbuf_failed: if (ops->datbuf) dma_unmap_page(chip->dev, args->data_dma_addr, ops->len, (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE)); out: return err; } /* * Function that updates NANDc register data (struct msm_nand_rw_reg_data) * required for page read/write. */ static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip, struct mtd_oob_ops *ops, struct msm_nand_rw_params *args, struct msm_nand_rw_reg_data *data) { if (args->read) { if (ops->mode != MTD_OPS_RAW) { data->cmd = MSM_NAND_CMD_PAGE_READ_ECC; data->cfg0 = (chip->cfg0 & ~(7U << CW_PER_PAGE)) | (((args->cwperpage-1) - args->start_sector) << CW_PER_PAGE); data->cfg1 = chip->cfg1; data->ecc_bch_cfg = chip->ecc_bch_cfg; } else { data->cmd = MSM_NAND_CMD_PAGE_READ_ALL; data->cfg0 = chip->cfg0_raw; data->cfg1 = chip->cfg1_raw; data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; } } else { if (ops->mode != MTD_OPS_RAW) { data->cfg0 = chip->cfg0; data->cfg1 = chip->cfg1; data->ecc_bch_cfg = chip->ecc_bch_cfg; } else { data->cfg0 = chip->cfg0_raw; data->cfg1 = chip->cfg1_raw; data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; } data->cmd = MSM_NAND_CMD_PRG_PAGE; data->clrfstatus = MSM_NAND_RESET_FLASH_STS; data->clrrstatus = MSM_NAND_RESET_READ_STS; } data->exec = 1; data->ecc_cfg = chip->ecc_buf_cfg; } /* * Function to prepare series of SPS command descriptors required for a page * read/write operation. */ static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops, struct msm_nand_rw_params *args, struct msm_nand_rw_reg_data *data, struct msm_nand_info *info, uint32_t curr_cw, struct msm_nand_sps_cmd **curr_cmd) { struct msm_nand_chip *chip = &info->nand_chip; struct msm_nand_sps_cmd *cmd; uint32_t rdata; /* read_location register parameters */ uint32_t offset, size, last_read; cmd = *curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd, ((curr_cw == args->start_sector) ? SPS_IOVEC_FLAG_LOCK : 0)); cmd++; if (curr_cw == args->start_sector) { msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data->addr0, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data->addr1, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data->cfg0, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data->cfg1, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE, data->ecc_bch_cfg, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info), WRITE, data->ecc_cfg, 0); cmd++; } if (!args->read) goto sub_exec_cmd; if (ops->mode == MTD_OPS_RAW) { rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31); msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0); cmd++; } if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) { offset = 0; size = (curr_cw < (args->cwperpage - 1)) ? 516 : (512 - ((args->cwperpage - 1) << 2)); last_read = (curr_cw < (args->cwperpage - 1)) ? 1 : (ops->oobbuf ? 0 : 1); rdata = (offset << 0) | (size << 16) | (last_read << 31); msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0); cmd++; } if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf && (curr_cw == (args->cwperpage - 1))) { offset = 512 - ((args->cwperpage - 1) << 2); size = (args->cwperpage) << 2; if (size > args->oob_len_cmd) size = args->oob_len_cmd; args->oob_len_cmd -= size; last_read = 1; rdata = (offset << 0) | (size << 16) | (last_read << 31); if (ops->datbuf) { msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info), WRITE, rdata, 0); } else { msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0); } cmd++; } sub_exec_cmd: msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec, SPS_IOVEC_FLAG_NWD); cmd++; *curr_cmd = cmd; } /* * Function to prepare and submit SPS data descriptors required for a page * read/write operation. */ static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops, struct msm_nand_rw_params *args, struct msm_nand_info *info, uint32_t curr_cw) { struct msm_nand_chip *chip = &info->nand_chip; struct sps_pipe *data_pipe_handle; uint32_t sectordatasize, sectoroobsize; uint32_t sps_flags = 0; int err = 0; if (args->read) data_pipe_handle = info->sps.data_prod.handle; else data_pipe_handle = info->sps.data_cons.handle; if (ops->mode == MTD_OPS_RAW) { sectordatasize = chip->cw_size; if (!args->read) sps_flags = SPS_IOVEC_FLAG_EOT; if (curr_cw == (args->cwperpage - 1)) sps_flags |= SPS_IOVEC_FLAG_INT; err = sps_transfer_one(data_pipe_handle, args->data_dma_addr_curr, sectordatasize, NULL, sps_flags); if (err) goto out; args->data_dma_addr_curr += sectordatasize; } else if (ops->mode == MTD_OPS_AUTO_OOB) { if (ops->datbuf) { sectordatasize = (curr_cw < (args->cwperpage - 1)) ? 516 : (512 - ((args->cwperpage - 1) << 2)); if (!args->read) { sps_flags = SPS_IOVEC_FLAG_EOT; if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) sps_flags = 0; } if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf) sps_flags |= SPS_IOVEC_FLAG_INT; err = sps_transfer_one(data_pipe_handle, args->data_dma_addr_curr, sectordatasize, NULL, sps_flags); if (err) goto out; args->data_dma_addr_curr += sectordatasize; } if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) { sectoroobsize = args->cwperpage << 2; if (sectoroobsize > args->oob_len_data) sectoroobsize = args->oob_len_data; if (!args->read) sps_flags |= SPS_IOVEC_FLAG_EOT; sps_flags |= SPS_IOVEC_FLAG_INT; err = sps_transfer_one(data_pipe_handle, args->oob_dma_addr_curr, sectoroobsize, NULL, sps_flags); if (err) goto out; args->oob_dma_addr_curr += sectoroobsize; args->oob_len_data -= sectoroobsize; } } out: return err; } /* * Function that gets called from upper layers such as MTD/YAFFS2 to read a * page with main or/and spare data. */ static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; uint32_t cwperpage = (mtd->writesize >> 9); int err, pageerr = 0, rawerr = 0; uint32_t n = 0, pages_read = 0; uint32_t ecc_errors = 0, total_ecc_errors = 0; struct msm_nand_rw_params rw_params; struct msm_nand_rw_reg_data data; struct msm_nand_sps_cmd *cmd, *curr_cmd; struct sps_iovec *iovec; /* * The following 6 commands will be sent only once for the first * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1, * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will * be sent for every CW - flash, read_location_0, read_location_1, * exec, flash_status and buffer_status. */ uint32_t total_cnt = (6 * cwperpage) + 6; struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; struct { uint32_t flash_status; uint32_t buffer_status; } result[cwperpage]; } *dma_buffer; memset(&rw_params, 0, sizeof(struct msm_nand_rw_params)); err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params); if (err) goto validate_mtd_params_failed; wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( chip, sizeof(*dma_buffer)))); rw_params.oob_col = rw_params.start_sector * chip->cw_size; if (chip->cfg1 & (1 << WIDE_FLASH)) rw_params.oob_col >>= 1; memset(&data, 0, sizeof(struct msm_nand_rw_reg_data)); msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data); while (rw_params.page_count-- > 0) { data.addr0 = (rw_params.page << 16) | rw_params.oob_col; data.addr1 = (rw_params.page >> 16) & 0xff; cmd = dma_buffer->cmd; for (n = rw_params.start_sector; n < cwperpage; n++) { dma_buffer->result[n].flash_status = 0xeeeeeeee; dma_buffer->result[n].buffer_status = 0xeeeeeeee; curr_cmd = cmd; msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info, n, &curr_cmd); cmd = curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->result[n].flash_status), 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->result[n].buffer_status), ((n == (cwperpage - 1)) ? (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) : 0)); cmd++; } BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (n = 0; n < dma_buffer->xfer.iovec_count; n++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[n].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[n].flags; iovec++; } mutex_lock(&info->bam_lock); /* Submit data descriptors */ for (n = rw_params.start_sector; n < cwperpage; n++) { err = msm_nand_submit_rw_data_desc(ops, &rw_params, info, n); if (err) { pr_err("Failed to submit data descs %d\n", err); mutex_unlock(&info->bam_lock); goto free_dma; } } /* Submit command descriptors */ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (err) { pr_err("Failed to submit commands %d\n", err); mutex_unlock(&info->bam_lock); goto free_dma; } wait_for_completion_io(&info->sps.cmd_pipe.completion); wait_for_completion_io(&info->sps.data_prod.completion); mutex_unlock(&info->bam_lock); /* Check for flash status errors */ pageerr = rawerr = 0; for (n = rw_params.start_sector; n < cwperpage; n++) { if (dma_buffer->result[n].flash_status & (FS_OP_ERR | FS_MPU_ERR)) { rawerr = -EIO; break; } } /* Check for ECC correction on empty block */ if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) { uint8_t *datbuf = ops->datbuf + pages_read * mtd->writesize; dma_sync_single_for_cpu(chip->dev, rw_params.data_dma_addr_curr - mtd->writesize, mtd->writesize, DMA_BIDIRECTIONAL); for (n = 0; n < mtd->writesize; n++) { /* TODO: check offset for 4bit BCHECC */ if ((n % 516 == 3 || n % 516 == 175) && datbuf[n] == 0x54) datbuf[n] = 0xff; if (datbuf[n] != 0xff) { pageerr = rawerr; break; } } dma_sync_single_for_device(chip->dev, rw_params.data_dma_addr_curr - mtd->writesize, mtd->writesize, DMA_BIDIRECTIONAL); } if (rawerr && ops->oobbuf) { dma_sync_single_for_cpu(chip->dev, rw_params.oob_dma_addr_curr - (ops->ooblen - rw_params.oob_len_data), ops->ooblen - rw_params.oob_len_data, DMA_BIDIRECTIONAL); for (n = 0; n < ops->ooblen; n++) { if (ops->oobbuf[n] != 0xff) { pageerr = rawerr; break; } } dma_sync_single_for_device(chip->dev, rw_params.oob_dma_addr_curr - (ops->ooblen - rw_params.oob_len_data), ops->ooblen - rw_params.oob_len_data, DMA_BIDIRECTIONAL); } /* check for uncorrectable errors */ if (pageerr) { for (n = rw_params.start_sector; n < cwperpage; n++) { if (dma_buffer->result[n].buffer_status & BS_UNCORRECTABLE_BIT) { mtd->ecc_stats.failed++; pageerr = -EBADMSG; break; } } } /* check for correctable errors */ if (!rawerr) { for (n = rw_params.start_sector; n < cwperpage; n++) { ecc_errors = dma_buffer->result[n].buffer_status & BS_CORRECTABLE_ERR_MSK; if (ecc_errors) { total_ecc_errors += ecc_errors; mtd->ecc_stats.corrected += ecc_errors; /* * For Micron devices it is observed * that correctable errors upto 3 bits * are very common. */ if (ecc_errors > 3) pageerr = -EUCLEAN; } } } if (pageerr && (pageerr != -EUCLEAN || err == 0)) err = pageerr; if (rawerr && !pageerr) { pr_debug("%llx %x %x empty page\n", (loff_t)rw_params.page * mtd->writesize, ops->len, ops->ooblen); } else { for (n = rw_params.start_sector; n < cwperpage; n++) pr_debug("cw %d: flash_sts %x buffr_sts %x\n", n, dma_buffer->result[n].flash_status, dma_buffer->result[n].buffer_status); } if (err && err != -EUCLEAN && err != -EBADMSG) goto free_dma; pages_read++; rw_params.page++; } free_dma: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); if (ops->oobbuf) dma_unmap_page(chip->dev, rw_params.oob_dma_addr, ops->ooblen, DMA_FROM_DEVICE); if (ops->datbuf) dma_unmap_page(chip->dev, rw_params.data_dma_addr, ops->len, DMA_BIDIRECTIONAL); validate_mtd_params_failed: if (ops->mode != MTD_OPS_RAW) ops->retlen = mtd->writesize * pages_read; else ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read; ops->oobretlen = ops->ooblen - rw_params.oob_len_data; if (err) pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n", from, ops->datbuf ? ops->len : 0, ops->ooblen, err, total_ecc_errors); pr_debug("ret %d, retlen %d oobretlen %d\n", err, ops->retlen, ops->oobretlen); pr_debug("========================================================\n"); return err; } /* * Function that gets called from upper layers such as MTD/YAFFS2 to read a * page with only main data. */ static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { int ret; struct mtd_oob_ops ops; ops.mode = MTD_OPS_AUTO_OOB; ops.len = len; ops.retlen = 0; ops.ooblen = 0; ops.datbuf = buf; ops.oobbuf = NULL; ret = msm_nand_read_oob(mtd, from, &ops); *retlen = ops.retlen; return ret; } /* * Function that gets called from upper layers such as MTD/YAFFS2 to write a * page with both main and spare data. */ static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; uint32_t cwperpage = (mtd->writesize >> 9); uint32_t n, flash_sts, pages_written = 0; int err = 0; struct msm_nand_rw_params rw_params; struct msm_nand_rw_reg_data data; struct msm_nand_sps_cmd *cmd, *curr_cmd; struct sps_iovec *iovec; /* * The following 7 commands will be sent only once : * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1, * dev0_ecc_cfg, ebi2_ecc_buf_cfg. * For last codeword (CW) - read_status(write) * * The following 4 commands will be sent for every CW : * flash, exec, flash_status (read), flash_status (write). */ uint32_t total_cnt = (4 * cwperpage) + 7; struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; struct { uint32_t flash_status[cwperpage]; } data; } *dma_buffer; memset(&rw_params, 0, sizeof(struct msm_nand_rw_params)); err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params); if (err) goto validate_mtd_params_failed; wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer)))); memset(&data, 0, sizeof(struct msm_nand_rw_reg_data)); msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data); while (rw_params.page_count-- > 0) { data.addr0 = (rw_params.page << 16); data.addr1 = (rw_params.page >> 16) & 0xff; cmd = dma_buffer->cmd; for (n = 0; n < cwperpage ; n++) { dma_buffer->data.flash_status[n] = 0xeeeeeeee; curr_cmd = cmd; msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info, n, &curr_cmd); cmd = curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->data.flash_status[n]), 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE, data.clrfstatus, 0); cmd++; if (n == (cwperpage - 1)) { msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE, data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); cmd++; } } BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (n = 0; n < dma_buffer->xfer.iovec_count; n++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[n].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[n].flags; iovec++; } mutex_lock(&info->bam_lock); /* Submit data descriptors */ for (n = 0; n < cwperpage; n++) { err = msm_nand_submit_rw_data_desc(ops, &rw_params, info, n); if (err) { pr_err("Failed to submit data descs %d\n", err); mutex_unlock(&info->bam_lock); goto free_dma; } } /* Submit command descriptors */ err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (err) { pr_err("Failed to submit commands %d\n", err); mutex_unlock(&info->bam_lock); goto free_dma; } wait_for_completion_io(&info->sps.cmd_pipe.completion); wait_for_completion_io(&info->sps.data_cons.completion); mutex_unlock(&info->bam_lock); for (n = 0; n < cwperpage; n++) pr_debug("write pg %d: flash_status[%d] = %x\n", rw_params.page, n, dma_buffer->data.flash_status[n]); /* Check for flash status errors */ for (n = 0; n < cwperpage; n++) { flash_sts = dma_buffer->data.flash_status[n]; if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) { pr_err("MPU/OP err (0x%x) set\n", flash_sts); err = -EIO; goto free_dma; } if (n == (cwperpage - 1)) { if (!(flash_sts & FS_DEVICE_WP) || (flash_sts & FS_DEVICE_STS_ERR)) { pr_err("Dev sts err 0x%x\n", flash_sts); err = -EIO; goto free_dma; } } } pages_written++; rw_params.page++; } free_dma: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); if (ops->oobbuf) dma_unmap_page(chip->dev, rw_params.oob_dma_addr, ops->ooblen, DMA_TO_DEVICE); if (ops->datbuf) dma_unmap_page(chip->dev, rw_params.data_dma_addr, ops->len, DMA_TO_DEVICE); validate_mtd_params_failed: if (ops->mode != MTD_OPS_RAW) ops->retlen = mtd->writesize * pages_written; else ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written; ops->oobretlen = ops->ooblen - rw_params.oob_len_data; if (err) pr_err("to %llx datalen %x ooblen %x failed with err %d\n", to, ops->len, ops->ooblen, err); pr_debug("ret %d, retlen %d oobretlen %d\n", err, ops->retlen, ops->oobretlen); pr_debug("================================================\n"); return err; } /* * Function that gets called from upper layers such as MTD/YAFFS2 to write a * page with only main data. */ static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { int ret; struct mtd_oob_ops ops; ops.mode = MTD_OPS_AUTO_OOB; ops.len = len; ops.retlen = 0; ops.ooblen = 0; ops.datbuf = (uint8_t *)buf; ops.oobbuf = NULL; ret = msm_nand_write_oob(mtd, to, &ops); *retlen = ops.retlen; return ret; } /* * Structure that contains NANDc register data for commands required * for Erase operation. */ struct msm_nand_erase_reg_data { struct msm_nand_common_cfgs cfg; uint32_t exec; uint32_t flash_status; uint32_t clrfstatus; uint32_t clrrstatus; }; /* * Function that gets called from upper layers such as MTD/YAFFS2 to erase a * block within NAND device. */ static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr) { int i, err = 0; struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; uint32_t page = 0; struct msm_nand_sps_cmd *cmd, *curr_cmd; struct msm_nand_erase_reg_data data; struct sps_iovec *iovec; uint32_t total_cnt = 9; /* * The following 9 commands are required to erase a page - * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read), * flash_status(write), read_status. */ struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; uint32_t flash_status; } *dma_buffer; if (mtd->writesize == PAGE_SIZE_2K) page = instr->addr >> 11; if (mtd->writesize == PAGE_SIZE_4K) page = instr->addr >> 12; if (instr->addr & (mtd->erasesize - 1)) { pr_err("unsupported erase address, 0x%llx\n", instr->addr); err = -EINVAL; goto out; } if (instr->len != mtd->erasesize) { pr_err("unsupported erase len, %lld\n", instr->len); err = -EINVAL; goto out; } wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( chip, sizeof(*dma_buffer)))); cmd = dma_buffer->cmd; memset(&data, 0, sizeof(struct msm_nand_erase_reg_data)); data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE; data.cfg.addr0 = page; data.cfg.addr1 = 0; data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE)); data.cfg.cfg1 = chip->cfg1; data.exec = 1; dma_buffer->flash_status = 0xeeeeeeee; data.clrfstatus = MSM_NAND_RESET_FLASH_STS; data.clrrstatus = MSM_NAND_RESET_READ_STS; curr_cmd = cmd; msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); cmd = curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec, SPS_IOVEC_FLAG_NWD); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->flash_status), 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE, data.clrfstatus, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE, data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT); cmd++; BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[i].flags; iovec++; } mutex_lock(&info->bam_lock); err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (err) { pr_err("Failed to submit commands %d\n", err); mutex_unlock(&info->bam_lock); goto free_dma; } wait_for_completion_io(&info->sps.cmd_pipe.completion); mutex_unlock(&info->bam_lock); /* Check for flash status errors */ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR | FS_DEVICE_STS_ERR)) { pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status); err = -EIO; } if (!(dma_buffer->flash_status & FS_DEVICE_WP)) { pr_err("Device is write protected\n"); err = -EIO; } if (err) { pr_err("Erase failed, 0x%llx\n", instr->addr); instr->fail_addr = instr->addr; instr->state = MTD_ERASE_FAILED; } else { instr->state = MTD_ERASE_DONE; instr->fail_addr = 0xffffffff; mtd_erase_callback(instr); } free_dma: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer)); out: return err; } /* * Structure that contains NANDc register data for commands required * for checking if a block is bad. */ struct msm_nand_blk_isbad_data { struct msm_nand_common_cfgs cfg; uint32_t ecc_bch_cfg; uint32_t exec; uint32_t read_offset; }; /* * Function that gets called from upper layers such as MTD/YAFFS2 to check if * a block is bad. This is done by reading the first page within a block and * checking whether the bad block byte location contains 0xFF or not. If it * doesn't contain 0xFF, then it is considered as bad block. */ static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; int i, ret = 0, bad_block = 0; uint8_t *buf; uint32_t page = 0, rdata, cwperpage; struct msm_nand_sps_cmd *cmd, *curr_cmd; struct msm_nand_blk_isbad_data data; struct sps_iovec *iovec; uint32_t total_cnt = 9; /* * The following 9 commands are required to check bad block - * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0, * exec, flash_status(read). */ struct { struct sps_transfer xfer; struct sps_iovec cmd_iovec[total_cnt]; struct msm_nand_sps_cmd cmd[total_cnt]; uint32_t flash_status; } *dma_buffer; if (mtd->writesize == PAGE_SIZE_2K) page = ofs >> 11; if (mtd->writesize == PAGE_SIZE_4K) page = ofs >> 12; cwperpage = (mtd->writesize >> 9); if (ofs > mtd->size) { pr_err("Invalid offset 0x%llx\n", ofs); bad_block = -EINVAL; goto out; } if (ofs & (mtd->erasesize - 1)) { pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs); bad_block = -EINVAL; goto out; } wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer( chip , sizeof(*dma_buffer) + 4))); buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer); cmd = dma_buffer->cmd; memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data)); data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL; data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE); data.cfg.cfg1 = chip->cfg1_raw; if (chip->cfg1 & (1 << WIDE_FLASH)) data.cfg.addr0 = (page << 16) | ((chip->cw_size * (cwperpage-1)) >> 1); else data.cfg.addr0 = (page << 16) | (chip->cw_size * (cwperpage-1)); data.cfg.addr1 = (page >> 16) & 0xff; data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE; data.exec = 1; data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1))); dma_buffer->flash_status = 0xeeeeeeee; curr_cmd = cmd; msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd); cmd = curr_cmd; msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE, data.ecc_bch_cfg, 0); cmd++; rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31); msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec, SPS_IOVEC_FLAG_NWD); cmd++; msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ, msm_virt_to_dma(chip, &dma_buffer->flash_status), SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK); cmd++; BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd)); dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd); dma_buffer->xfer.iovec = dma_buffer->cmd_iovec; dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip, &dma_buffer->cmd_iovec); iovec = dma_buffer->xfer.iovec; for (i = 0; i < dma_buffer->xfer.iovec_count; i++) { iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce); iovec->size = sizeof(struct sps_command_element); iovec->flags = dma_buffer->cmd[i].flags; iovec++; } mutex_lock(&info->bam_lock); /* Submit data descriptor */ ret = sps_transfer_one(info->sps.data_prod.handle, msm_virt_to_dma(chip, buf), 4, NULL, SPS_IOVEC_FLAG_INT); if (ret) { pr_err("Failed to submit data desc %d\n", ret); mutex_unlock(&info->bam_lock); goto free_dma; } /* Submit command descriptor */ ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer); if (ret) { pr_err("Failed to submit commands %d\n", ret); mutex_unlock(&info->bam_lock); goto free_dma; } wait_for_completion_io(&info->sps.cmd_pipe.completion); wait_for_completion_io(&info->sps.data_prod.completion); mutex_unlock(&info->bam_lock); /* Check for flash status errors */ if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) { pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status); bad_block = -EIO; goto free_dma; } /* Check for bad block marker byte */ if (chip->cfg1 & (1 << WIDE_FLASH)) { if (buf[0] != 0xFF || buf[1] != 0xFF) bad_block = 1; } else { if (buf[0] != 0xFF) bad_block = 1; } free_dma: msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4); out: return ret ? ret : bad_block; } /* * Function that gets called from upper layers such as MTD/YAFFS2 to mark a * block as bad. This is done by writing the first page within a block with 0, * thus setting the bad block byte location as well to 0. */ static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_oob_ops ops; int ret; uint8_t *buf; size_t len; if (ofs > mtd->size) { pr_err("Invalid offset 0x%llx\n", ofs); ret = -EINVAL; goto out; } if (ofs & (mtd->erasesize - 1)) { pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs); ret = -EINVAL; goto out; } len = mtd->writesize + mtd->oobsize; buf = kzalloc(len, GFP_KERNEL); if (!buf) { pr_err("unable to allocate memory for 0x%x size\n", len); ret = -ENOMEM; goto out; } ops.mode = MTD_OPS_RAW; ops.len = len; ops.retlen = 0; ops.ooblen = 0; ops.datbuf = buf; ops.oobbuf = NULL; ret = msm_nand_write_oob(mtd, ofs, &ops); kfree(buf); out: return ret; } /* * Function that scans for the attached NAND device. This fills out all * the uninitialized function pointers with the defaults. The flash ID is * read and the mtd/chip structures are filled with the appropriate values. */ int msm_nand_scan(struct mtd_info *mtd) { struct msm_nand_info *info = mtd->priv; struct msm_nand_chip *chip = &info->nand_chip; struct flash_identification *supported_flash = &info->flash_dev; int flash_id = 0, err = 0; uint32_t i, mtd_writesize; uint8_t dev_found = 0, wide_bus; uint32_t manid, devid, devcfg; uint32_t bad_block_byte; struct nand_flash_dev *flashdev = NULL; struct nand_manufacturers *flashman = NULL; /* Probe the Flash device for ONFI compliance */ if (!msm_nand_flash_onfi_probe(info)) { dev_found = 1; } else { err = msm_nand_flash_read_id(info, 0, &flash_id); if (err < 0) { pr_err("Failed to read Flash ID\n"); err = -EINVAL; goto out; } manid = flash_id & 0xFF; devid = (flash_id >> 8) & 0xFF; devcfg = (flash_id >> 24) & 0xFF; for (i = 0; !flashman && nand_manuf_ids[i].id; ++i) if (nand_manuf_ids[i].id == manid) flashman = &nand_manuf_ids[i]; for (i = 0; !flashdev && nand_flash_ids[i].id; ++i) if (nand_flash_ids[i].id == devid) flashdev = &nand_flash_ids[i]; if (!flashdev || !flashman) { pr_err("unknown nand flashid=%x manuf=%x devid=%x\n", flash_id, manid, devid); err = -ENOENT; goto out; } dev_found = 1; if (!flashdev->pagesize) { supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0; supported_flash->pagesize = 1024 << (devcfg & 0x3); supported_flash->blksize = (64 * 1024) << ((devcfg >> 4) & 0x3); supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) * (supported_flash->pagesize >> 9); } else { supported_flash->widebus = flashdev->options & NAND_BUSWIDTH_16 ? 1 : 0; supported_flash->pagesize = flashdev->pagesize; supported_flash->blksize = flashdev->erasesize; supported_flash->oobsize = flashdev->pagesize >> 5; } supported_flash->flash_id = flash_id; supported_flash->density = flashdev->chipsize << 20; } if (dev_found) { wide_bus = supported_flash->widebus; mtd->size = supported_flash->density; mtd->writesize = supported_flash->pagesize; mtd->oobsize = supported_flash->oobsize; mtd->erasesize = supported_flash->blksize; mtd_writesize = mtd->writesize; /* Check whether NAND device support 8bit ECC*/ if (supported_flash->ecc_correctability >= 8) chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH; else chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH; pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n", supported_flash->flash_id, (wide_bus) ? 16 : 8, (mtd->size >> 20)); pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n", mtd->writesize, mtd->erasesize, mtd->oobsize); pr_info("BCH ECC: %d Bit\n", (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4)); } chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528; chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE) | (516 << UD_SIZE_BYTES) | (0 << DISABLE_STATUS_AFTER_WRITE) | (5 << NUM_ADDR_CYCLES); bad_block_byte = (mtd_writesize - (chip->cw_size * ( (mtd_writesize >> 9) - 1)) + 1); chip->cfg1 = (7 << NAND_RECOVERY_CYCLES) | (0 << CS_ACTIVE_BSY) | (bad_block_byte << BAD_BLOCK_BYTE_NUM) | (0 << BAD_BLOCK_IN_SPARE_AREA) | (2 << WR_RD_BSY_GAP) | ((wide_bus ? 1 : 0) << WIDE_FLASH) | (1 << ENABLE_BCH_ECC); chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE) | (5 << NUM_ADDR_CYCLES) | (0 << SPARE_SIZE_BYTES) | (chip->cw_size << UD_SIZE_BYTES); chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES) | (0 << CS_ACTIVE_BSY) | (17 << BAD_BLOCK_BYTE_NUM) | (1 << BAD_BLOCK_IN_SPARE_AREA) | (2 << WR_RD_BSY_GAP) | ((wide_bus ? 1 : 0) << WIDE_FLASH) | (1 << DEV0_CFG1_ECC_DISABLE); chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE) | (0 << ECC_SW_RESET) | (516 << ECC_NUM_DATA_BYTES) | (1 << ECC_FORCE_CLK_OPEN); if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) { chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES : 2 << SPARE_SIZE_BYTES); chip->ecc_bch_cfg |= (1 << ECC_MODE) | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) : (13 << ECC_PARITY_SIZE_BYTES)); } else { chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES : 4 << SPARE_SIZE_BYTES); chip->ecc_bch_cfg |= (0 << ECC_MODE) | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) : (7 << ECC_PARITY_SIZE_BYTES)); } /* * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O) * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O). */ chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7); chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */ pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n" " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n" " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n" " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1, chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg, chip->ecc_bch_cfg, bad_block_byte); if (mtd->oobsize == 64) { mtd->oobavail = 16; } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) { mtd->oobavail = 32; } else { pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize); err = -ENODEV; goto out; } /* Fill in remaining MTD driver data */ mtd->type = MTD_NANDFLASH; mtd->flags = MTD_CAP_NANDFLASH; mtd->_erase = msm_nand_erase; mtd->_block_isbad = msm_nand_block_isbad; mtd->_block_markbad = msm_nand_block_markbad; mtd->_read = msm_nand_read; mtd->_write = msm_nand_write; mtd->_read_oob = msm_nand_read_oob; mtd->_write_oob = msm_nand_write_oob; mtd->owner = THIS_MODULE; out: return err; } #define BAM_APPS_PIPE_LOCK_GRP0 0 #define BAM_APPS_PIPE_LOCK_GRP1 1 /* * This function allocates, configures, connects an end point and * also registers event notification for an end point. It also allocates * DMA memory for descriptor FIFO of a pipe. */ static int msm_nand_init_endpoint(struct msm_nand_info *info, struct msm_nand_sps_endpt *end_point, uint32_t pipe_index) { int rc = 0; struct sps_pipe *pipe_handle; struct sps_connect *sps_config = &end_point->config; struct sps_register_event *sps_event = &end_point->event; pipe_handle = sps_alloc_endpoint(); if (!pipe_handle) { pr_err("sps_alloc_endpoint() failed\n"); rc = -ENOMEM; goto out; } rc = sps_get_config(pipe_handle, sps_config); if (rc) { pr_err("sps_get_config() failed %d\n", rc); goto free_endpoint; } if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) { /* READ CASE: source - BAM; destination - system memory */ sps_config->source = info->sps.bam_handle; sps_config->destination = SPS_DEV_HANDLE_MEM; sps_config->mode = SPS_MODE_SRC; sps_config->src_pipe_index = pipe_index; } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX || pipe_index == SPS_CMD_CONS_PIPE_INDEX) { /* WRITE CASE: source - system memory; destination - BAM */ sps_config->source = SPS_DEV_HANDLE_MEM; sps_config->destination = info->sps.bam_handle; sps_config->mode = SPS_MODE_DEST; sps_config->dest_pipe_index = pipe_index; } sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE; if (pipe_index == SPS_DATA_PROD_PIPE_INDEX || pipe_index == SPS_DATA_CONS_PIPE_INDEX) sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0; else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX) sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1; /* * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors * are allowed to be submitted before we get any ack for any of them, * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) * * sizeof(struct sps_iovec). */ sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) * sizeof(struct sps_iovec); sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev, sps_config->desc.size, &sps_config->desc.phys_base, GFP_KERNEL); if (!sps_config->desc.base) { pr_err("dmam_alloc_coherent() failed for size %x\n", sps_config->desc.size); rc = -ENOMEM; goto free_endpoint; } memset(sps_config->desc.base, 0x00, sps_config->desc.size); rc = sps_connect(pipe_handle, sps_config); if (rc) { pr_err("sps_connect() failed %d\n", rc); goto free_endpoint; } init_completion(&end_point->completion); sps_event->mode = SPS_TRIGGER_WAIT; sps_event->options = SPS_O_DESC_DONE; sps_event->xfer_done = &end_point->completion; sps_event->user = (void *)info; rc = sps_register_event(pipe_handle, sps_event); if (rc) { pr_err("sps_register_event() failed %d\n", rc); goto sps_disconnect; } end_point->handle = pipe_handle; pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle, pipe_index); goto out; sps_disconnect: sps_disconnect(pipe_handle); free_endpoint: sps_free_endpoint(pipe_handle); out: return rc; } /* This function disconnects and frees an end point */ static void msm_nand_deinit_endpoint(struct msm_nand_info *info, struct msm_nand_sps_endpt *end_point) { sps_disconnect(end_point->handle); sps_free_endpoint(end_point->handle); } /* * This function registers BAM device and initializes its end points for * the following pipes - * system consumer pipe for data (pipe#0), * system producer pipe for data (pipe#1), * system consumer pipe for commands (pipe#2). */ static int msm_nand_bam_init(struct msm_nand_info *nand_info) { struct sps_bam_props bam = {0}; int rc = 0; bam.phys_addr = nand_info->bam_phys; bam.virt_addr = nand_info->bam_base; bam.irq = nand_info->bam_irq; /* * NAND device is accessible from both Apps and Modem processor and * thus, NANDc and BAM are shared between both the processors. But BAM * must be enabled and instantiated only once during boot up by * Trustzone before Modem/Apps is brought out from reset. * * This is indicated to SPS driver on Apps by marking flag * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global * initializations that will be done by Trustzone - Execution * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and * Descriptor summing threshold. * * NANDc BAM device supports 2 execution environments - Modem and Apps * and thus the flag SPS_BAM_MGR_MULTI_EE is set. */ bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE; rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle); if (!rc) goto init_sps_ep; rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle); if (rc) { pr_err("%s: sps_register_bam_device() failed with %d\n", __func__, rc); goto out; } pr_info("%s: BAM device registered: bam_handle 0x%x\n", __func__, nand_info->sps.bam_handle); init_sps_ep: rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod, SPS_DATA_PROD_PIPE_INDEX); if (rc) goto out; rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons, SPS_DATA_CONS_PIPE_INDEX); if (rc) goto deinit_data_prod; rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe, SPS_CMD_CONS_PIPE_INDEX); if (rc) goto deinit_data_cons; goto out; deinit_data_cons: msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons); deinit_data_prod: msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod); out: return rc; } /* * This function disconnects and frees its end points for all the pipes. * Since the BAM is shared resource, it is not deregistered as its handle * might be in use with LCDC. */ static void msm_nand_bam_free(struct msm_nand_info *nand_info) { msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod); msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons); msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe); } /* This function enables DMA support for the NANDc in BAM mode. */ static int msm_nand_enable_dma(struct msm_nand_info *info) { struct msm_nand_sps_cmd *sps_cmd; struct msm_nand_chip *chip = &info->nand_chip; int ret; wait_event(chip->dma_wait_queue, (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd)))); msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE, (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT); ret = sps_transfer_one(info->sps.cmd_pipe.handle, msm_virt_to_dma(chip, &sps_cmd->ce), sizeof(struct sps_command_element), NULL, sps_cmd->flags); if (ret) { pr_err("Failed to submit command: %d\n", ret); goto out; } wait_for_completion_io(&info->sps.cmd_pipe.completion); out: msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd)); return ret; } #ifdef CONFIG_MSM_SMD static int msm_nand_parse_smem_ptable(int *nr_parts) { uint32_t i, j; uint32_t len = FLASH_PTABLE_HDR_LEN; struct flash_partition_entry *pentry; char *delimiter = ":"; pr_info("Parsing partition table info from SMEM\n"); /* Read only the header portion of ptable */ ptable = *(struct flash_partition_table *) (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len)); /* Verify ptable magic */ if (ptable.magic1 != FLASH_PART_MAGIC1 || ptable.magic2 != FLASH_PART_MAGIC2) { pr_err("Partition table magic verification failed\n"); goto out; } /* Ensure that # of partitions is less than the max we have allocated */ if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) { pr_err("Partition numbers exceed the max limit\n"); goto out; } /* Find out length of partition data based on table version. */ if (ptable.version <= FLASH_PTABLE_V3) { len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 * sizeof(struct flash_partition_entry); } else if (ptable.version == FLASH_PTABLE_V4) { len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 * sizeof(struct flash_partition_entry); } else { pr_err("Unknown ptable version (%d)", ptable.version); goto out; } *nr_parts = ptable.numparts; ptable = *(struct flash_partition_table *) (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len)); for (i = 0; i < ptable.numparts; i++) { pentry = &ptable.part_entry[i]; if (pentry->name == '\0') continue; /* Convert name to lower case and discard the initial chars */ mtd_part[i].name = pentry->name; for (j = 0; j < strlen(mtd_part[i].name); j++) *(mtd_part[i].name + j) = tolower(*(mtd_part[i].name + j)); strsep(&(mtd_part[i].name), delimiter); mtd_part[i].offset = pentry->offset; mtd_part[i].mask_flags = pentry->attr; mtd_part[i].size = pentry->length; pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n", i, pentry->name, pentry->offset, pentry->length, pentry->attr); } pr_info("SMEM partition table found: ver: %d len: %d\n", ptable.version, ptable.numparts); return 0; out: return -EINVAL; } #else static int msm_nand_parse_smem_ptable(int *nr_parts) { return -ENODEV; } #endif /* * This function gets called when its device named msm-nand is added to * device tree .dts file with all its resources such as physical addresses * for NANDc and BAM, BAM IRQ. * * It also expects the NAND flash partition information to be passed in .dts * file so that it can parse the partitions by calling MTD function * mtd_device_parse_register(). * */ static int __devinit msm_nand_probe(struct platform_device *pdev) { struct msm_nand_info *info; struct resource *res; int i, err, nr_parts; /* * The partition information can also be passed from kernel command * line. Also, the MTD core layer supports adding the whole device as * one MTD device when no partition information is available at all. */ info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info), GFP_KERNEL); if (!info) { pr_err("Unable to allocate memory for msm_nand_info\n"); err = -ENOMEM; goto out; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_phys"); if (!res || !res->start) { pr_err("NAND phys address range is not provided\n"); err = -ENODEV; goto out; } info->nand_phys = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bam_phys"); if (!res || !res->start) { pr_err("BAM phys address range is not provided\n"); err = -ENODEV; goto out; } info->bam_phys = res->start; info->bam_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!info->bam_base) { pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n", res->start, resource_size(res)); err = -ENOMEM; goto out; } info->bam_irq = platform_get_irq_byname(pdev, "bam_irq"); if (info->bam_irq < 0) { pr_err("BAM IRQ is not provided\n"); err = -ENODEV; goto out; } info->mtd.name = dev_name(&pdev->dev); info->mtd.priv = info; info->mtd.owner = THIS_MODULE; info->nand_chip.dev = &pdev->dev; init_waitqueue_head(&info->nand_chip.dma_wait_queue); mutex_init(&info->bam_lock); info->nand_chip.dma_virt_addr = dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE, &info->nand_chip.dma_phys_addr, GFP_KERNEL); if (!info->nand_chip.dma_virt_addr) { pr_err("No memory for DMA buffer size %x\n", MSM_NAND_DMA_BUFFER_SIZE); err = -ENOMEM; goto out; } err = msm_nand_bam_init(info); if (err) { pr_err("msm_nand_bam_init() failed %d\n", err); goto out; } err = msm_nand_enable_dma(info); if (err) { pr_err("Failed to enable DMA in NANDc\n"); goto free_bam; } err = msm_nand_parse_smem_ptable(&nr_parts); if (err < 0) { pr_err("Failed to parse partition table in SMEM\n"); goto free_bam; } if (msm_nand_scan(&info->mtd)) { pr_err("No nand device found\n"); err = -ENXIO; goto free_bam; } for (i = 0; i < nr_parts; i++) { mtd_part[i].offset *= info->mtd.erasesize; mtd_part[i].size *= info->mtd.erasesize; } err = mtd_device_parse_register(&info->mtd, NULL, NULL, &mtd_part[0], nr_parts); if (err < 0) { pr_err("Unable to register MTD partitions %d\n", err); goto free_bam; } dev_set_drvdata(&pdev->dev, info); pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n", info->nand_phys, info->bam_phys, info->bam_irq); pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n", info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr); goto out; free_bam: msm_nand_bam_free(info); out: return err; } /* * Remove functionality that gets called when driver/device msm-nand * is removed. */ static int __devexit msm_nand_remove(struct platform_device *pdev) { struct msm_nand_info *info = dev_get_drvdata(&pdev->dev); dev_set_drvdata(&pdev->dev, NULL); if (info) { mtd_device_unregister(&info->mtd); msm_nand_bam_free(info); } return 0; } #define DRIVER_NAME "msm_qpic_nand" static const struct of_device_id msm_nand_match_table[] = { { .compatible = "qcom,msm-nand", }, {}, }; static struct platform_driver msm_nand_driver = { .probe = msm_nand_probe, .remove = __devexit_p(msm_nand_remove), .driver = { .name = DRIVER_NAME, .of_match_table = msm_nand_match_table, }, }; module_platform_driver(msm_nand_driver); MODULE_ALIAS(DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
gpl-2.0
golden-guy/android_kernel_samsung_golden
fs/xfs/linux-2.6/xfs_super.c
1734
46635
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_dir2.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_fsops.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_log_priv.h" #include "xfs_trans_priv.h" #include "xfs_filestream.h" #include "xfs_da_btree.h" #include "xfs_extfree_item.h" #include "xfs_mru_cache.h" #include "xfs_inode_item.h" #include "xfs_sync.h" #include "xfs_trace.h" #include <linux/namei.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mount.h> #include <linux/mempool.h> #include <linux/writeback.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/parser.h> static const struct super_operations xfs_super_operations; static kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ #define MNTOPT_LOGDEV "logdev" /* log device */ #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and * unwritten extent conversion */ #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes * in stat(). */ #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ #define MNTOPT_NOQUOTA "noquota" /* no quotas */ #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ /* * Table driven mount option parser. * * Currently only used for remount, but it will be used for mount * in the future, too. */ enum { Opt_barrier, Opt_nobarrier, Opt_err }; static const match_table_t tokens = { {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_err, NULL} }; STATIC unsigned long suffix_strtoul(char *s, char **endp, unsigned int base) { int last, shift_left_factor = 0; char *value = s; last = strlen(value) - 1; if (value[last] == 'K' || value[last] == 'k') { shift_left_factor = 10; value[last] = '\0'; } if (value[last] == 'M' || value[last] == 'm') { shift_left_factor = 20; value[last] = '\0'; } if (value[last] == 'G' || value[last] == 'g') { shift_left_factor = 30; value[last] = '\0'; } return simple_strtoul((const char *)s, endp, base) << shift_left_factor; } /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock has _not_ yet been read in. * * Note that this function leaks the various device name allocations on * failure. The caller takes care of them. */ STATIC int xfs_parseargs( struct xfs_mount *mp, char *options) { struct super_block *sb = mp->m_super; char *this_char, *value, *eov; int dsunit = 0; int dswidth = 0; int iosize = 0; __uint8_t iosizelog = 0; /* * set up the mount name first so all the errors will refer to the * correct device. */ mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); if (!mp->m_fsname) return ENOMEM; mp->m_fsname_len = strlen(mp->m_fsname) + 1; /* * Copy binary VFS mount flags we are interested in. */ if (sb->s_flags & MS_RDONLY) mp->m_flags |= XFS_MOUNT_RDONLY; if (sb->s_flags & MS_DIRSYNC) mp->m_flags |= XFS_MOUNT_DIRSYNC; if (sb->s_flags & MS_SYNCHRONOUS) mp->m_flags |= XFS_MOUNT_WSYNC; /* * Set some default flags that could be cleared by the mount option * parsing. */ mp->m_flags |= XFS_MOUNT_BARRIER; mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; mp->m_flags |= XFS_MOUNT_SMALL_INUMS; mp->m_flags |= XFS_MOUNT_DELAYLOG; /* * These can be overridden by the mount option parsing. */ mp->m_logbufs = -1; mp->m_logbsize = -1; if (!options) goto done; while ((this_char = strsep(&options, ",")) != NULL) { if (!*this_char) continue; if ((value = strchr(this_char, '=')) != NULL) *value++ = 0; if (!strcmp(this_char, MNTOPT_LOGBUFS)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_logbufs = simple_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_logbsize = suffix_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); if (!mp->m_logname) return ENOMEM; } else if (!strcmp(this_char, MNTOPT_MTPT)) { xfs_warn(mp, "%s option not allowed on this system", this_char); return EINVAL; } else if (!strcmp(this_char, MNTOPT_RTDEV)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); if (!mp->m_rtname) return ENOMEM; } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } iosize = simple_strtoul(value, &eov, 10); iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } iosize = suffix_strtoul(value, &eov, 10); iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_GRPID) || !strcmp(this_char, MNTOPT_BSDGROUPS)) { mp->m_flags |= XFS_MOUNT_GRPID; } else if (!strcmp(this_char, MNTOPT_NOGRPID) || !strcmp(this_char, MNTOPT_SYSVGROUPS)) { mp->m_flags &= ~XFS_MOUNT_GRPID; } else if (!strcmp(this_char, MNTOPT_WSYNC)) { mp->m_flags |= XFS_MOUNT_WSYNC; } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { mp->m_flags |= XFS_MOUNT_NORECOVERY; } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { mp->m_flags |= XFS_MOUNT_NOALIGN; } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { mp->m_flags |= XFS_MOUNT_SWALLOC; } else if (!strcmp(this_char, MNTOPT_SUNIT)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } dsunit = simple_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { if (!value || !*value) { xfs_warn(mp, "%s option requires an argument", this_char); return EINVAL; } dswidth = simple_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; #if !XFS_BIG_INUMS xfs_warn(mp, "%s option not allowed on this system", this_char); return EINVAL; #endif } else if (!strcmp(this_char, MNTOPT_NOUUID)) { mp->m_flags |= XFS_MOUNT_NOUUID; } else if (!strcmp(this_char, MNTOPT_BARRIER)) { mp->m_flags |= XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { mp->m_flags &= ~XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_IKEEP)) { mp->m_flags |= XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { mp->m_flags &= ~XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_ATTR2)) { mp->m_flags |= XFS_MOUNT_ATTR2; } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { mp->m_flags &= ~XFS_MOUNT_ATTR2; mp->m_flags |= XFS_MOUNT_NOATTR2; } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { mp->m_flags |= XFS_MOUNT_FILESTREAMS; } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || !strcmp(this_char, MNTOPT_USRQUOTA)) { mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_UQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || !strcmp(this_char, MNTOPT_UQUOTANOENF)) { mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); mp->m_qflags &= ~XFS_UQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_PQUOTA) || !strcmp(this_char, MNTOPT_PRJQUOTA)) { mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_GQUOTA) || !strcmp(this_char, MNTOPT_GRPQUOTA)) { mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { mp->m_flags |= XFS_MOUNT_DELAYLOG; } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { mp->m_flags &= ~XFS_MOUNT_DELAYLOG; } else if (!strcmp(this_char, MNTOPT_DISCARD)) { mp->m_flags |= XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { mp->m_flags &= ~XFS_MOUNT_DISCARD; } else if (!strcmp(this_char, "ihashsize")) { xfs_warn(mp, "ihashsize no longer used, option is deprecated."); } else if (!strcmp(this_char, "osyncisdsync")) { xfs_warn(mp, "osyncisdsync has no effect, option is deprecated."); } else if (!strcmp(this_char, "osyncisosync")) { xfs_warn(mp, "osyncisosync has no effect, option is deprecated."); } else if (!strcmp(this_char, "irixsgid")) { xfs_warn(mp, "irixsgid is now a sysctl(2) variable, option is deprecated."); } else { xfs_warn(mp, "unknown mount option [%s].", this_char); return EINVAL; } } /* * no recovery flag requires a read-only mount */ if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && !(mp->m_flags & XFS_MOUNT_RDONLY)) { xfs_warn(mp, "no-recovery mounts must be read-only."); return EINVAL; } if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { xfs_warn(mp, "sunit and swidth options incompatible with the noalign option"); return EINVAL; } if ((mp->m_flags & XFS_MOUNT_DISCARD) && !(mp->m_flags & XFS_MOUNT_DELAYLOG)) { xfs_warn(mp, "the discard option is incompatible with the nodelaylog option"); return EINVAL; } #ifndef CONFIG_XFS_QUOTA if (XFS_IS_QUOTA_RUNNING(mp)) { xfs_warn(mp, "quota support not available in this kernel."); return EINVAL; } #endif if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { xfs_warn(mp, "cannot mount with both project and group quota"); return EINVAL; } if ((dsunit && !dswidth) || (!dsunit && dswidth)) { xfs_warn(mp, "sunit and swidth must be specified together"); return EINVAL; } if (dsunit && (dswidth % dsunit != 0)) { xfs_warn(mp, "stripe width (%d) must be a multiple of the stripe unit (%d)", dswidth, dsunit); return EINVAL; } done: if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { /* * At this point the superblock has not been read * in, therefore we do not know the block size. * Before the mount call ends we will convert * these to FSBs. */ if (dsunit) { mp->m_dalign = dsunit; mp->m_flags |= XFS_MOUNT_RETERR; } if (dswidth) mp->m_swidth = dswidth; } if (mp->m_logbufs != -1 && mp->m_logbufs != 0 && (mp->m_logbufs < XLOG_MIN_ICLOGS || mp->m_logbufs > XLOG_MAX_ICLOGS)) { xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); return XFS_ERROR(EINVAL); } if (mp->m_logbsize != -1 && mp->m_logbsize != 0 && (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || !is_power_of_2(mp->m_logbsize))) { xfs_warn(mp, "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", mp->m_logbsize); return XFS_ERROR(EINVAL); } if (iosizelog) { if (iosizelog > XFS_MAX_IO_LOG || iosizelog < XFS_MIN_IO_LOG) { xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", iosizelog, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); return XFS_ERROR(EINVAL); } mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; mp->m_readio_log = iosizelog; mp->m_writeio_log = iosizelog; } return 0; } struct proc_xfs_info { int flag; char *str; }; STATIC int xfs_showargs( struct xfs_mount *mp, struct seq_file *m) { static struct proc_xfs_info xfs_info_set[] = { /* the few simple ones we can get from the mount struct */ { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, { 0, NULL } }; static struct proc_xfs_info xfs_info_unset[] = { /* the few simple ones we can get from the mount struct */ { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, { 0, NULL } }; struct proc_xfs_info *xfs_infop; for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { if (mp->m_flags & xfs_infop->flag) seq_puts(m, xfs_infop->str); } for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { if (!(mp->m_flags & xfs_infop->flag)) seq_puts(m, xfs_infop->str); } if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", (int)(1 << mp->m_writeio_log) >> 10); if (mp->m_logbufs > 0) seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); if (mp->m_logbsize > 0) seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); if (mp->m_logname) seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); if (mp->m_rtname) seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); if (mp->m_dalign > 0) seq_printf(m, "," MNTOPT_SUNIT "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); if (mp->m_swidth > 0) seq_printf(m, "," MNTOPT_SWIDTH "=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) seq_puts(m, "," MNTOPT_USRQUOTA); else if (mp->m_qflags & XFS_UQUOTA_ACCT) seq_puts(m, "," MNTOPT_UQUOTANOENF); /* Either project or group quotas can be active, not both */ if (mp->m_qflags & XFS_PQUOTA_ACCT) { if (mp->m_qflags & XFS_OQUOTA_ENFD) seq_puts(m, "," MNTOPT_PRJQUOTA); else seq_puts(m, "," MNTOPT_PQUOTANOENF); } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { if (mp->m_qflags & XFS_OQUOTA_ENFD) seq_puts(m, "," MNTOPT_GRPQUOTA); else seq_puts(m, "," MNTOPT_GQUOTANOENF); } if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) seq_puts(m, "," MNTOPT_NOQUOTA); return 0; } __uint64_t xfs_max_file_offset( unsigned int blockshift) { unsigned int pagefactor = 1; unsigned int bitshift = BITS_PER_LONG - 1; /* Figure out maximum filesize, on Linux this can depend on * the filesystem blocksize (on 32 bit platforms). * __block_write_begin does this in an [unsigned] long... * page->index << (PAGE_CACHE_SHIFT - bbits) * So, for page sized blocks (4K on 32 bit platforms), * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) * but for smaller blocksizes it is less (bbits = log2 bsize). * Note1: get_block_t takes a long (implicit cast from above) * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch * can optionally convert the [unsigned] long from above into * an [unsigned] long long. */ #if BITS_PER_LONG == 32 # if defined(CONFIG_LBDAF) ASSERT(sizeof(sector_t) == 8); pagefactor = PAGE_CACHE_SIZE; bitshift = BITS_PER_LONG; # else pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); # endif #endif return (((__uint64_t)pagefactor) << bitshift) - 1; } STATIC int xfs_blkdev_get( xfs_mount_t *mp, const char *name, struct block_device **bdevp) { int error = 0; *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, mp); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); } return -error; } STATIC void xfs_blkdev_put( struct block_device *bdev) { if (bdev) blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } void xfs_blkdev_issue_flush( xfs_buftarg_t *buftarg) { blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL); } STATIC void xfs_close_devices( struct xfs_mount *mp) { if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { struct block_device *logdev = mp->m_logdev_targp->bt_bdev; xfs_free_buftarg(mp, mp->m_logdev_targp); xfs_blkdev_put(logdev); } if (mp->m_rtdev_targp) { struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; xfs_free_buftarg(mp, mp->m_rtdev_targp); xfs_blkdev_put(rtdev); } xfs_free_buftarg(mp, mp->m_ddev_targp); } /* * The file system configurations are: * (1) device (partition) with data and internal log * (2) logical volume with data and log subvolumes. * (3) logical volume with data, log, and realtime subvolumes. * * We only have to handle opening the log and realtime volumes here if * they are present. The data subvolume has already been opened by * get_sb_bdev() and is stored in sb->s_bdev. */ STATIC int xfs_open_devices( struct xfs_mount *mp) { struct block_device *ddev = mp->m_super->s_bdev; struct block_device *logdev = NULL, *rtdev = NULL; int error; /* * Open real time and log devices - order is important. */ if (mp->m_logname) { error = xfs_blkdev_get(mp, mp->m_logname, &logdev); if (error) goto out; } if (mp->m_rtname) { error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); if (error) goto out_close_logdev; if (rtdev == ddev || rtdev == logdev) { xfs_warn(mp, "Cannot mount filesystem with identical rtdev and ddev/logdev."); error = EINVAL; goto out_close_rtdev; } } /* * Setup xfs_mount buffer target pointers */ error = ENOMEM; mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); if (!mp->m_ddev_targp) goto out_close_rtdev; if (rtdev) { mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, mp->m_fsname); if (!mp->m_rtdev_targp) goto out_free_ddev_targ; } if (logdev && logdev != ddev) { mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, mp->m_fsname); if (!mp->m_logdev_targp) goto out_free_rtdev_targ; } else { mp->m_logdev_targp = mp->m_ddev_targp; } return 0; out_free_rtdev_targ: if (mp->m_rtdev_targp) xfs_free_buftarg(mp, mp->m_rtdev_targp); out_free_ddev_targ: xfs_free_buftarg(mp, mp->m_ddev_targp); out_close_rtdev: if (rtdev) xfs_blkdev_put(rtdev); out_close_logdev: if (logdev && logdev != ddev) xfs_blkdev_put(logdev); out: return error; } /* * Setup xfs_mount buffer target pointers based on superblock */ STATIC int xfs_setup_devices( struct xfs_mount *mp) { int error; error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_sectsize); if (error) return error; if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { unsigned int log_sector_size = BBSIZE; if (xfs_sb_version_hassector(&mp->m_sb)) log_sector_size = mp->m_sb.sb_logsectsize; error = xfs_setsize_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize, log_sector_size); if (error) return error; } if (mp->m_rtdev_targp) { error = xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_blocksize, mp->m_sb.sb_sectsize); if (error) return error; } return 0; } /* Catch misguided souls that try to use this interface on XFS */ STATIC struct inode * xfs_fs_alloc_inode( struct super_block *sb) { BUG(); return NULL; } /* * Now that the generic code is guaranteed not to be accessing * the linux inode, we can reclaim the inode. */ STATIC void xfs_fs_destroy_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); trace_xfs_destroy_inode(ip); XFS_STATS_INC(vn_reclaim); /* bad inode, get out here ASAP */ if (is_bad_inode(inode)) goto out_reclaim; xfs_ioend_wait(ip); ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); /* * We should never get here with one of the reclaim flags already set. */ ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); /* * We always use background reclaim here because even if the * inode is clean, it still may be under IO and hence we have * to take the flush lock. The background reclaim path handles * this more efficiently than we can here, so simply let background * reclaim tear down all inodes. */ out_reclaim: xfs_inode_set_reclaim_tag(ip); } /* * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; * all other fields need to be initialised on allocation * from the slab. This avoids the need to repeatedly initialise * fields in the xfs inode that left in the initialise state * when freeing the inode. */ STATIC void xfs_fs_inode_init_once( void *inode) { struct xfs_inode *ip = inode; memset(ip, 0, sizeof(struct xfs_inode)); /* vfs inode */ inode_init_once(VFS_I(ip)); /* xfs inode */ atomic_set(&ip->i_iocount, 0); atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); init_waitqueue_head(&ip->i_ipin_wait); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&ip->i_flush); complete(&ip->i_flush); mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, "xfsino", ip->i_ino); } /* * Dirty the XFS inode when mark_inode_dirty_sync() is called so that * we catch unlogged VFS level updates to the inode. * * We need the barrier() to maintain correct ordering between unlogged * updates and the transaction commit code that clears the i_update_core * field. This requires all updates to be completed before marking the * inode dirty. */ STATIC void xfs_fs_dirty_inode( struct inode *inode, int flags) { barrier(); XFS_I(inode)->i_update_core = 1; } STATIC int xfs_fs_write_inode( struct inode *inode, struct writeback_control *wbc) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; int error = EAGAIN; trace_xfs_write_inode(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a * synchronous transaction we let the log force inside the * ->sync_fs call do that for thus, which reduces the number * of synchronous log foces dramatically. */ xfs_ioend_wait(ip); error = xfs_log_dirty_inode(ip, NULL, 0); if (error) goto out; return 0; } else { if (!ip->i_update_core) return 0; /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. * This prevents the flush path from blocking on inodes inside * another operation right now, they get caught later by * xfs_sync. */ if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) goto out; if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) goto out_unlock; /* * Now we have the flush lock and the inode is not pinned, we * can check if the inode is really clean as we know that * there are no pending transaction completions, it is not * waiting on the delayed write queue and there is no IO in * progress. */ if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); error = 0; goto out_unlock; } error = xfs_iflush(ip, SYNC_TRYLOCK); } out_unlock: xfs_iunlock(ip, XFS_ILOCK_SHARED); out: /* * if we failed to write out the inode then mark * it dirty again so we'll try again later. */ if (error) xfs_mark_inode_dirty_sync(ip); return -error; } STATIC void xfs_fs_evict_inode( struct inode *inode) { xfs_inode_t *ip = XFS_I(inode); trace_xfs_evict_inode(ip); truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); XFS_STATS_INC(vn_rele); XFS_STATS_INC(vn_remove); XFS_STATS_DEC(vn_active); /* * The iolock is used by the file system to coordinate reads, * writes, and block truncates. Up to this point the lock * protected concurrent accesses by users of the inode. But * from here forward we're doing some final processing of the * inode because we're done with it, and although we reuse the * iolock for protection it is really a distinct lock class * (in the lockdep sense) from before. To keep lockdep happy * (and basically indicate what we are doing), we explicitly * re-init the iolock here. */ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); xfs_inactive(ip); } STATIC void xfs_free_fsname( struct xfs_mount *mp) { kfree(mp->m_fsname); kfree(mp->m_rtname); kfree(mp->m_logname); } STATIC void xfs_fs_put_super( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); /* * Unregister the memory shrinker before we tear down the mount * structure so we don't have memory reclaim racing with us here. */ xfs_inode_shrinker_unregister(mp); xfs_syncd_stop(mp); /* * Blow away any referenced inode in the filestreams cache. * This can and will cause log traffic as inodes go inactive * here. */ xfs_filestream_unmount(mp); XFS_bflush(mp->m_ddev_targp); xfs_unmountfs(mp); xfs_freesb(mp); xfs_icsb_destroy_counters(mp); xfs_close_devices(mp); xfs_free_fsname(mp); kfree(mp); } STATIC int xfs_fs_sync_fs( struct super_block *sb, int wait) { struct xfs_mount *mp = XFS_M(sb); int error; /* * Not much we can do for the first async pass. Writing out the * superblock would be counter-productive as we are going to redirty * when writing out other data and metadata (and writing out a single * block is quite fast anyway). * * Try to asynchronously kick off quota syncing at least. */ if (!wait) { xfs_qm_sync(mp, SYNC_TRYLOCK); return 0; } error = xfs_quiesce_data(mp); if (error) return -error; if (laptop_mode) { /* * The disk must be active because we're syncing. * We schedule xfssyncd now (now that the disk is * active) instead of later (when it might not be). */ flush_delayed_work_sync(&mp->m_sync_work); } return 0; } STATIC int xfs_fs_statfs( struct dentry *dentry, struct kstatfs *statp) { struct xfs_mount *mp = XFS_M(dentry->d_sb); xfs_sb_t *sbp = &mp->m_sb; struct xfs_inode *ip = XFS_I(dentry->d_inode); __uint64_t fakeinos, id; xfs_extlen_t lsize; __int64_t ffree; statp->f_type = XFS_SB_MAGIC; statp->f_namelen = MAXNAMELEN - 1; id = huge_encode_dev(mp->m_ddev_targp->bt_dev); statp->f_fsid.val[0] = (u32)id; statp->f_fsid.val[1] = (u32)(id >> 32); xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); spin_lock(&mp->m_sb_lock); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; statp->f_blocks = sbp->sb_dblocks - lsize; statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); fakeinos = statp->f_bfree << sbp->sb_inopblog; statp->f_files = MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); if (mp->m_maxicount) statp->f_files = min_t(typeof(statp->f_files), statp->f_files, mp->m_maxicount); /* make sure statp->f_ffree does not underflow */ ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); statp->f_ffree = max_t(__int64_t, ffree, 0); spin_unlock(&mp->m_sb_lock); if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) xfs_qm_statvfs(ip, statp); return 0; } STATIC void xfs_save_resvblks(struct xfs_mount *mp) { __uint64_t resblks = 0; mp->m_resblks_save = mp->m_resblks; xfs_reserve_blocks(mp, &resblks, NULL); } STATIC void xfs_restore_resvblks(struct xfs_mount *mp) { __uint64_t resblks; if (mp->m_resblks_save) { resblks = mp->m_resblks_save; mp->m_resblks_save = 0; } else resblks = xfs_default_resblks(mp); xfs_reserve_blocks(mp, &resblks, NULL); } STATIC int xfs_fs_remount( struct super_block *sb, int *flags, char *options) { struct xfs_mount *mp = XFS_M(sb); substring_t args[MAX_OPT_ARGS]; char *p; int error; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_barrier: mp->m_flags |= XFS_MOUNT_BARRIER; break; case Opt_nobarrier: mp->m_flags &= ~XFS_MOUNT_BARRIER; break; default: /* * Logically we would return an error here to prevent * users from believing they might have changed * mount options using remount which can't be changed. * * But unfortunately mount(8) adds all options from * mtab and fstab to the mount arguments in some cases * so we can't blindly reject options, but have to * check for each specified option if it actually * differs from the currently set option and only * reject it if that's the case. * * Until that is implemented we return success for * every remount request, and silently ignore all * options that we can't actually change. */ #if 0 xfs_info(mp, "mount option \"%s\" not supported for remount\n", p); return -EINVAL; #else break; #endif } } /* ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { mp->m_flags &= ~XFS_MOUNT_RDONLY; /* * If this is the first remount to writeable state we * might have some superblock changes to update. */ if (mp->m_update_flags) { error = xfs_mount_log_sb(mp, mp->m_update_flags); if (error) { xfs_warn(mp, "failed to write sb changes"); return error; } mp->m_update_flags = 0; } /* * Fill out the reserve pool if it is empty. Use the stashed * value if it is non-zero, otherwise go with the default. */ xfs_restore_resvblks(mp); } /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { /* * After we have synced the data but before we sync the * metadata, we need to free up the reserve block pool so that * the used block count in the superblock on disk is correct at * the end of the remount. Stash the current reserve pool size * so that if we get remounted rw, we can return it to the same * size. */ xfs_quiesce_data(mp); xfs_save_resvblks(mp); xfs_quiesce_attr(mp); mp->m_flags |= XFS_MOUNT_RDONLY; } return 0; } /* * Second stage of a freeze. The data is already frozen so we only * need to take care of the metadata. Once that's done write a dummy * record to dirty the log in case of a crash while frozen. */ STATIC int xfs_fs_freeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_save_resvblks(mp); xfs_quiesce_attr(mp); return -xfs_fs_log_dummy(mp); } STATIC int xfs_fs_unfreeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_restore_resvblks(mp); return 0; } STATIC int xfs_fs_show_options( struct seq_file *m, struct vfsmount *mnt) { return -xfs_showargs(XFS_M(mnt->mnt_sb), m); } /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock _has_ now been read in. */ STATIC int xfs_finish_flags( struct xfs_mount *mp) { int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); /* Fail a mount where the logbuf is smaller than the log stripe */ if (xfs_sb_version_haslogv2(&mp->m_sb)) { if (mp->m_logbsize <= 0 && mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { mp->m_logbsize = mp->m_sb.sb_logsunit; } else if (mp->m_logbsize > 0 && mp->m_logbsize < mp->m_sb.sb_logsunit) { xfs_warn(mp, "logbuf size must be greater than or equal to log stripe size"); return XFS_ERROR(EINVAL); } } else { /* Fail a mount if the logbuf is larger than 32K */ if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { xfs_warn(mp, "logbuf size for version 1 logs must be 16K or 32K"); return XFS_ERROR(EINVAL); } } /* * mkfs'ed attr2 will turn on attr2 mount unless explicitly * told by noattr2 to turn it off */ if (xfs_sb_version_hasattr2(&mp->m_sb) && !(mp->m_flags & XFS_MOUNT_NOATTR2)) mp->m_flags |= XFS_MOUNT_ATTR2; /* * prohibit r/w mounts of read-only filesystems */ if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { xfs_warn(mp, "cannot mount a read-only filesystem as read-write"); return XFS_ERROR(EROFS); } return 0; } STATIC int xfs_fs_fill_super( struct super_block *sb, void *data, int silent) { struct inode *root; struct xfs_mount *mp = NULL; int flags = 0, error = ENOMEM; mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); if (!mp) goto out; spin_lock_init(&mp->m_sb_lock); mutex_init(&mp->m_growlock); atomic_set(&mp->m_active_trans, 0); mp->m_super = sb; sb->s_fs_info = mp; error = xfs_parseargs(mp, (char *)data); if (error) goto out_free_fsname; sb_min_blocksize(sb, BBSIZE); sb->s_xattr = xfs_xattr_handlers; sb->s_export_op = &xfs_export_operations; #ifdef CONFIG_XFS_QUOTA sb->s_qcop = &xfs_quotactl_operations; #endif sb->s_op = &xfs_super_operations; if (silent) flags |= XFS_MFSI_QUIET; error = xfs_open_devices(mp); if (error) goto out_free_fsname; error = xfs_icsb_init_counters(mp); if (error) goto out_close_devices; error = xfs_readsb(mp, flags); if (error) goto out_destroy_counters; error = xfs_finish_flags(mp); if (error) goto out_free_sb; error = xfs_setup_devices(mp); if (error) goto out_free_sb; error = xfs_filestream_mount(mp); if (error) goto out_free_sb; /* * we must configure the block size in the superblock before we run the * full mount process as the mount process can lookup and cache inodes. * For the same reason we must also initialise the syncd and register * the inode cache shrinker so that inodes can be reclaimed during * operations like a quotacheck that iterate all inodes in the * filesystem. */ sb->s_magic = XFS_SB_MAGIC; sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); sb->s_time_gran = 1; set_posix_acl_flag(sb); xfs_inode_shrinker_register(mp); error = xfs_mountfs(mp); if (error) goto out_filestream_unmount; error = xfs_syncd_init(mp); if (error) goto out_unmount; root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = ENOENT; goto out_syncd_stop; } if (is_bad_inode(root)) { error = EINVAL; goto out_syncd_stop; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { error = ENOMEM; goto out_iput; } return 0; out_filestream_unmount: xfs_inode_shrinker_unregister(mp); xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); out_destroy_counters: xfs_icsb_destroy_counters(mp); out_close_devices: xfs_close_devices(mp); out_free_fsname: xfs_free_fsname(mp); kfree(mp); out: return -error; out_iput: iput(root); out_syncd_stop: xfs_syncd_stop(mp); out_unmount: xfs_inode_shrinker_unregister(mp); /* * Blow away any referenced inode in the filestreams cache. * This can and will cause log traffic as inodes go inactive * here. */ xfs_filestream_unmount(mp); XFS_bflush(mp->m_ddev_targp); xfs_unmountfs(mp); goto out_free_sb; } STATIC struct dentry * xfs_fs_mount( struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); } static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .dirty_inode = xfs_fs_dirty_inode, .write_inode = xfs_fs_write_inode, .evict_inode = xfs_fs_evict_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, .unfreeze_fs = xfs_fs_unfreeze, .statfs = xfs_fs_statfs, .remount_fs = xfs_fs_remount, .show_options = xfs_fs_show_options, }; static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .mount = xfs_fs_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; STATIC int __init xfs_init_zones(void) { xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); if (!xfs_ioend_zone) goto out; xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, xfs_ioend_zone); if (!xfs_ioend_pool) goto out_destroy_ioend_zone; xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), "xfs_log_ticket"); if (!xfs_log_ticket_zone) goto out_destroy_ioend_pool; xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), "xfs_bmap_free_item"); if (!xfs_bmap_free_item_zone) goto out_destroy_log_ticket_zone; xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); if (!xfs_btree_cur_zone) goto out_destroy_bmap_free_item_zone; xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), "xfs_da_state"); if (!xfs_da_state_zone) goto out_destroy_btree_cur_zone; xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); if (!xfs_dabuf_zone) goto out_destroy_da_state_zone; xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); if (!xfs_ifork_zone) goto out_destroy_dabuf_zone; xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); if (!xfs_trans_zone) goto out_destroy_ifork_zone; xfs_log_item_desc_zone = kmem_zone_init(sizeof(struct xfs_log_item_desc), "xfs_log_item_desc"); if (!xfs_log_item_desc_zone) goto out_destroy_trans_zone; /* * The size of the zone allocated buf log item is the maximum * size possible under XFS. This wastes a little bit of memory, * but it is much faster. */ xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD) * sizeof(int))), "xfs_buf_item"); if (!xfs_buf_item_zone) goto out_destroy_log_item_desc_zone; xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efd_item"); if (!xfs_efd_zone) goto out_destroy_buf_item_zone; xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * sizeof(xfs_extent_t))), "xfs_efi_item"); if (!xfs_efi_zone) goto out_destroy_efd_zone; xfs_inode_zone = kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, xfs_fs_inode_init_once); if (!xfs_inode_zone) goto out_destroy_efi_zone; xfs_ili_zone = kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", KM_ZONE_SPREAD, NULL); if (!xfs_ili_zone) goto out_destroy_inode_zone; return 0; out_destroy_inode_zone: kmem_zone_destroy(xfs_inode_zone); out_destroy_efi_zone: kmem_zone_destroy(xfs_efi_zone); out_destroy_efd_zone: kmem_zone_destroy(xfs_efd_zone); out_destroy_buf_item_zone: kmem_zone_destroy(xfs_buf_item_zone); out_destroy_log_item_desc_zone: kmem_zone_destroy(xfs_log_item_desc_zone); out_destroy_trans_zone: kmem_zone_destroy(xfs_trans_zone); out_destroy_ifork_zone: kmem_zone_destroy(xfs_ifork_zone); out_destroy_dabuf_zone: kmem_zone_destroy(xfs_dabuf_zone); out_destroy_da_state_zone: kmem_zone_destroy(xfs_da_state_zone); out_destroy_btree_cur_zone: kmem_zone_destroy(xfs_btree_cur_zone); out_destroy_bmap_free_item_zone: kmem_zone_destroy(xfs_bmap_free_item_zone); out_destroy_log_ticket_zone: kmem_zone_destroy(xfs_log_ticket_zone); out_destroy_ioend_pool: mempool_destroy(xfs_ioend_pool); out_destroy_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); out: return -ENOMEM; } STATIC void xfs_destroy_zones(void) { kmem_zone_destroy(xfs_ili_zone); kmem_zone_destroy(xfs_inode_zone); kmem_zone_destroy(xfs_efi_zone); kmem_zone_destroy(xfs_efd_zone); kmem_zone_destroy(xfs_buf_item_zone); kmem_zone_destroy(xfs_log_item_desc_zone); kmem_zone_destroy(xfs_trans_zone); kmem_zone_destroy(xfs_ifork_zone); kmem_zone_destroy(xfs_dabuf_zone); kmem_zone_destroy(xfs_da_state_zone); kmem_zone_destroy(xfs_btree_cur_zone); kmem_zone_destroy(xfs_bmap_free_item_zone); kmem_zone_destroy(xfs_log_ticket_zone); mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_ioend_zone); } STATIC int __init xfs_init_workqueues(void) { /* * max_active is set to 8 to give enough concurency to allow * multiple work operations on each CPU to run. This allows multiple * filesystems to be running sync work concurrently, and scales with * the number of CPUs in the system. */ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); if (!xfs_syncd_wq) return -ENOMEM; return 0; } STATIC void xfs_destroy_workqueues(void) { destroy_workqueue(xfs_syncd_wq); } STATIC int __init init_xfs_fs(void) { int error; printk(KERN_INFO XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"); xfs_ioend_init(); xfs_dir_startup(); error = xfs_init_zones(); if (error) goto out; error = xfs_init_workqueues(); if (error) goto out_destroy_zones; error = xfs_mru_cache_init(); if (error) goto out_destroy_wq; error = xfs_filestream_init(); if (error) goto out_mru_cache_uninit; error = xfs_buf_init(); if (error) goto out_filestream_uninit; error = xfs_init_procfs(); if (error) goto out_buf_terminate; error = xfs_sysctl_register(); if (error) goto out_cleanup_procfs; vfs_initquota(); error = register_filesystem(&xfs_fs_type); if (error) goto out_sysctl_unregister; return 0; out_sysctl_unregister: xfs_sysctl_unregister(); out_cleanup_procfs: xfs_cleanup_procfs(); out_buf_terminate: xfs_buf_terminate(); out_filestream_uninit: xfs_filestream_uninit(); out_mru_cache_uninit: xfs_mru_cache_uninit(); out_destroy_wq: xfs_destroy_workqueues(); out_destroy_zones: xfs_destroy_zones(); out: return error; } STATIC void __exit exit_xfs_fs(void) { vfs_exitquota(); unregister_filesystem(&xfs_fs_type); xfs_sysctl_unregister(); xfs_cleanup_procfs(); xfs_buf_terminate(); xfs_filestream_uninit(); xfs_mru_cache_uninit(); xfs_destroy_workqueues(); xfs_destroy_zones(); } module_init(init_xfs_fs); module_exit(exit_xfs_fs); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); MODULE_LICENSE("GPL");
gpl-2.0
yandex/smart
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
1990
2171
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <subdev/mc.h> struct nv44_mc_priv { struct nouveau_mc base; }; static int nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv44_mc_priv *priv; int ret; ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; return 0; } static int nv44_mc_init(struct nouveau_object *object) { struct nv44_mc_priv *priv = (void *)object; u32 tmp = nv_rd32(priv, 0x10020c); nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */ nv_wr32(priv, 0x001700, tmp); nv_wr32(priv, 0x001704, 0); nv_wr32(priv, 0x001708, 0); nv_wr32(priv, 0x00170c, tmp); return nouveau_mc_init(&priv->base); } struct nouveau_oclass nv44_mc_oclass = { .handle = NV_SUBDEV(MC, 0x44), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nv44_mc_ctor, .dtor = _nouveau_mc_dtor, .init = nv44_mc_init, .fini = _nouveau_mc_fini, }, };
gpl-2.0
Pesach85/ph85-p880-kernel-project
drivers/video/console/vgacon.c
3014
41514
/* * linux/drivers/video/vgacon.c -- Low level VGA based console driver * * Created 28 Sep 1997 by Geert Uytterhoeven * * Rewritten by Martin Mares <mj@ucw.cz>, July 1998 * * This file is based on the old console.c, vga.c and vesa_blank.c drivers. * * Copyright (C) 1991, 1992 Linus Torvalds * 1995 Jay Estabrook * * User definable mapping table and font loading by Eugene G. Crosser, * <crosser@average.org> * * Improved loadable font/UTF-8 support by H. Peter Anvin * Feb-Sep 1995 <peter.anvin@linux.org> * * Colour palette handling, by Simon Tatham * 17-Jun-95 <sgt20@cam.ac.uk> * * if 512 char mode is already enabled don't re-enable it, * because it causes screen to flicker, by Mitja Horvat * 5-May-96 <mitja.horvat@guest.arnes.si> * * Use 2 outw instead of 4 outb_p to reduce erroneous text * flashing on RHS of screen during heavy console scrolling . * Oct 1996, Paul Gortmaker. * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/slab.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/spinlock.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/screen_info.h> #include <video/vga.h> #include <asm/io.h> static DEFINE_SPINLOCK(vga_lock); static int cursor_size_lastfrom; static int cursor_size_lastto; static u32 vgacon_xres; static u32 vgacon_yres; static struct vgastate state; #define BLANK 0x0020 #define CAN_LOAD_EGA_FONTS /* undefine if the user must not do this */ #define CAN_LOAD_PALETTE /* undefine if the user must not do this */ /* You really do _NOT_ want to define this, unless you have buggy * Trident VGA which will resize cursor when moving it between column * 15 & 16. If you define this and your VGA is OK, inverse bug will * appear. */ #undef TRIDENT_GLITCH #define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */ /* * Interface used by the world */ static const char *vgacon_startup(void); static void vgacon_init(struct vc_data *c, int init); static void vgacon_deinit(struct vc_data *c); static void vgacon_cursor(struct vc_data *c, int mode); static int vgacon_switch(struct vc_data *c); static int vgacon_blank(struct vc_data *c, int blank, int mode_switch); static int vgacon_set_palette(struct vc_data *vc, unsigned char *table); static int vgacon_scrolldelta(struct vc_data *c, int lines); static int vgacon_set_origin(struct vc_data *c); static void vgacon_save_screen(struct vc_data *c); static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, int lines); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static unsigned long vgacon_uni_pagedir[2]; /* Description of the hardware situation */ static int vga_init_done __read_mostly; static unsigned long vga_vram_base __read_mostly; /* Base of video memory */ static unsigned long vga_vram_end __read_mostly; /* End of video memory */ static unsigned int vga_vram_size __read_mostly; /* Size of video memory */ static u16 vga_video_port_reg __read_mostly; /* Video register select port */ static u16 vga_video_port_val __read_mostly; /* Video register value port */ static unsigned int vga_video_num_columns; /* Number of text columns */ static unsigned int vga_video_num_lines; /* Number of text lines */ static int vga_can_do_color __read_mostly; /* Do we support colors? */ static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */ static unsigned char vga_video_type __read_mostly; /* Card type */ static unsigned char vga_hardscroll_enabled __read_mostly; static unsigned char vga_hardscroll_user_enable __read_mostly = 1; static unsigned char vga_font_is_default = 1; static int vga_vesa_blanked; static int vga_palette_blanked; static int vga_is_gfx; static int vga_512_chars; static int vga_video_font_height; static int vga_scan_lines __read_mostly; static unsigned int vga_rolled_over; static int vgacon_text_mode_force; bool vgacon_text_force(void) { return vgacon_text_mode_force ? true : false; } EXPORT_SYMBOL(vgacon_text_force); static int __init text_mode(char *str) { vgacon_text_mode_force = 1; return 1; } /* force text mode - used by kernel modesetting */ __setup("nomodeset", text_mode); static int __init no_scroll(char *str) { /* * Disabling scrollback is required for the Braillex ib80-piezo * Braille reader made by F.H. Papenmeier (Germany). * Use the "no-scroll" bootflag. */ vga_hardscroll_user_enable = vga_hardscroll_enabled = 0; return 1; } __setup("no-scroll", no_scroll); /* * By replacing the four outb_p with two back to back outw, we can reduce * the window of opportunity to see text mislocated to the RHS of the * console during heavy scrolling activity. However there is the remote * possibility that some pre-dinosaur hardware won't like the back to back * I/O. Since the Xservers get away with it, we should be able to as well. */ static inline void write_vga(unsigned char reg, unsigned int val) { unsigned int v1, v2; unsigned long flags; /* * ddprintk might set the console position from interrupt * handlers, thus the write has to be IRQ-atomic. */ spin_lock_irqsave(&vga_lock, flags); #ifndef SLOW_VGA v1 = reg + (val & 0xff00); v2 = reg + 1 + ((val << 8) & 0xff00); outw(v1, vga_video_port_reg); outw(v2, vga_video_port_reg); #else outb_p(reg, vga_video_port_reg); outb_p(val >> 8, vga_video_port_val); outb_p(reg + 1, vga_video_port_reg); outb_p(val & 0xff, vga_video_port_val); #endif spin_unlock_irqrestore(&vga_lock, flags); } static inline void vga_set_mem_top(struct vc_data *c) { write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } #ifdef CONFIG_VGACON_SOFT_SCROLLBACK /* software scrollback */ static void *vgacon_scrollback; static int vgacon_scrollback_tail; static int vgacon_scrollback_size; static int vgacon_scrollback_rows; static int vgacon_scrollback_cnt; static int vgacon_scrollback_cur; static int vgacon_scrollback_save; static int vgacon_scrollback_restore; static void vgacon_scrollback_init(int pitch) { int rows = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024/pitch; if (vgacon_scrollback) { vgacon_scrollback_cnt = 0; vgacon_scrollback_tail = 0; vgacon_scrollback_cur = 0; vgacon_scrollback_rows = rows - 1; vgacon_scrollback_size = rows * pitch; } } static void vgacon_scrollback_startup(void) { vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT); vgacon_scrollback_init(vga_video_num_columns * 2); } static void vgacon_scrollback_update(struct vc_data *c, int t, int count) { void *p; if (!vgacon_scrollback_size || c->vc_num != fg_console) return; p = (void *) (c->vc_origin + t * c->vc_size_row); while (count--) { scr_memcpyw(vgacon_scrollback + vgacon_scrollback_tail, p, c->vc_size_row); vgacon_scrollback_cnt++; p += c->vc_size_row; vgacon_scrollback_tail += c->vc_size_row; if (vgacon_scrollback_tail >= vgacon_scrollback_size) vgacon_scrollback_tail = 0; if (vgacon_scrollback_cnt > vgacon_scrollback_rows) vgacon_scrollback_cnt = vgacon_scrollback_rows; vgacon_scrollback_cur = vgacon_scrollback_cnt; } } static void vgacon_restore_screen(struct vc_data *c) { vgacon_scrollback_save = 0; if (!vga_is_gfx && !vgacon_scrollback_restore) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); vgacon_scrollback_restore = 1; vgacon_scrollback_cur = vgacon_scrollback_cnt; } } static int vgacon_scrolldelta(struct vc_data *c, int lines) { int start, end, count, soff; if (!lines) { c->vc_visible_origin = c->vc_origin; vga_set_mem_top(c); return 1; } if (!vgacon_scrollback) return 1; if (!vgacon_scrollback_save) { vgacon_cursor(c, CM_ERASE); vgacon_save_screen(c); vgacon_scrollback_save = 1; } vgacon_scrollback_restore = 0; start = vgacon_scrollback_cur + lines; end = start + abs(lines); if (start < 0) start = 0; if (start > vgacon_scrollback_cnt) start = vgacon_scrollback_cnt; if (end < 0) end = 0; if (end > vgacon_scrollback_cnt) end = vgacon_scrollback_cnt; vgacon_scrollback_cur = start; count = end - start; soff = vgacon_scrollback_tail - ((vgacon_scrollback_cnt - end) * c->vc_size_row); soff -= count * c->vc_size_row; if (soff < 0) soff += vgacon_scrollback_size; count = vgacon_scrollback_cnt - start; if (count > c->vc_rows) count = c->vc_rows; if (count) { int copysize; int diff = c->vc_rows - count; void *d = (void *) c->vc_origin; void *s = (void *) c->vc_screenbuf; count *= c->vc_size_row; /* how much memory to end of buffer left? */ copysize = min(count, vgacon_scrollback_size - soff); scr_memcpyw(d, vgacon_scrollback + soff, copysize); d += copysize; count -= copysize; if (count) { scr_memcpyw(d, vgacon_scrollback, count); d += count; } if (diff) scr_memcpyw(d, s, diff * c->vc_size_row); } else vgacon_cursor(c, CM_MOVE); return 1; } #else #define vgacon_scrollback_startup(...) do { } while (0) #define vgacon_scrollback_init(...) do { } while (0) #define vgacon_scrollback_update(...) do { } while (0) static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) vgacon_scrolldelta(c, 0); } static int vgacon_scrolldelta(struct vc_data *c, int lines) { if (!lines) /* Turn scrollback off */ c->vc_visible_origin = c->vc_origin; else { int margin = c->vc_size_row * 4; int ul, we, p, st; if (vga_rolled_over > (c->vc_scr_end - vga_vram_base) + margin) { ul = c->vc_scr_end - vga_vram_base; we = vga_rolled_over + c->vc_size_row; } else { ul = 0; we = vga_vram_size; } p = (c->vc_visible_origin - vga_vram_base - ul + we) % we + lines * c->vc_size_row; st = (c->vc_origin - vga_vram_base - ul + we) % we; if (st < 2 * margin) margin = 0; if (p < margin) p = 0; if (p > st - margin) p = st; c->vc_visible_origin = vga_vram_base + (p + ul) % we; } vga_set_mem_top(c); return 1; } #endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ static const char *vgacon_startup(void) { const char *display_desc = NULL; u16 saved1, saved2; volatile u16 *p; if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; return conswitchp->con_startup(); #else return NULL; #endif } /* boot_params.screen_info initialized? */ if ((screen_info.orig_video_mode == 0) && (screen_info.orig_video_lines == 0) && (screen_info.orig_video_cols == 0)) goto no_vga; /* VGA16 modes are not handled by VGACON */ if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */ (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */ (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */ (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */ (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */ goto no_vga; vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; state.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ vga_vram_base = 0xb0000; vga_video_port_reg = VGA_CRT_IM; vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = { .name = "ega", .start = 0x3B0, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource mda1_console_resource = { .name = "mda", .start = 0x3B0, .end = 0x3BB }; static struct resource mda2_console_resource = { .name = "mda", .start = 0x3BF, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; request_resource(&ioport_resource, &mda1_console_resource); request_resource(&ioport_resource, &mda2_console_resource); vga_video_font_height = 14; } } else { /* If not, it is color. */ vga_can_do_color = 1; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { int i; vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { static struct resource ega_console_resource = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource vga_console_resource = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, &vga_console_resource); #ifdef VGA_CAN_DO_64KB /* * get 64K rather than 32K of video RAM. * This doesn't actually work on all "VGA" * controllers (it seems like setting MM=01 * and COE=1 isn't necessarily a good idea) */ vga_vram_base = 0xa0000; vga_vram_size = 0x10000; outb_p(6, VGA_GFX_I); outb_p(6, VGA_GFX_D); #endif /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 * DAC entries. */ for (i = 0; i < 16; i++) { inb_p(VGA_IS1_RC); outb_p(i, VGA_ATT_W); outb_p(i, VGA_ATT_W); } outb_p(0x20, VGA_ATT_W); /* * Now set the DAC registers back to their * default values */ for (i = 0; i < 16; i++) { outb_p(color_table[i], VGA_PEL_IW); outb_p(default_red[i], VGA_PEL_D); outb_p(default_grn[i], VGA_PEL_D); outb_p(default_blu[i], VGA_PEL_D); } } } else { static struct resource cga_console_resource = { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; request_resource(&ioport_resource, &cga_console_resource); vga_video_font_height = 8; } } vga_vram_base = VGA_MAP_MEM(vga_vram_base, vga_vram_size); vga_vram_end = vga_vram_base + vga_vram_size; /* * Find out if there is a graphics card present. * Are there smarter methods around? */ p = (volatile u16 *) vga_vram_base; saved1 = scr_readw(p); saved2 = scr_readw(p + 1); scr_writew(0xAA55, p); scr_writew(0x55AA, p + 1); if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(0x55AA, p); scr_writew(0xAA55, p + 1); if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(saved1, p); scr_writew(saved2, p + 1); if (vga_video_type == VIDEO_TYPE_EGAC || vga_video_type == VIDEO_TYPE_VGAC || vga_video_type == VIDEO_TYPE_EGAM) { vga_hardscroll_enabled = vga_hardscroll_user_enable; vga_default_font_height = screen_info.orig_video_points; vga_video_font_height = screen_info.orig_video_points; /* This may be suboptimal but is a safe bet - go with it */ vga_scan_lines = vga_video_font_height * vga_video_num_lines; } vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; if (!vga_init_done) { vgacon_scrollback_startup(); vga_init_done = 1; } return display_desc; } static void vgacon_init(struct vc_data *c, int init) { unsigned long p; /* * We cannot be loaded as a module, therefore init is always 1, * but vgacon_init can be called more than once, and init will * not be 1. */ c->vc_can_do_color = vga_can_do_color; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { c->vc_cols = vga_video_num_columns; c->vc_rows = vga_video_num_lines; } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); c->vc_scan_lines = vga_scan_lines; c->vc_font.height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; p = *c->vc_uni_pagedir_loc; if (c->vc_uni_pagedir_loc == &c->vc_uni_pagedir || !--c->vc_uni_pagedir_loc[1]) con_free_unimap(c); c->vc_uni_pagedir_loc = vgacon_uni_pagedir; vgacon_uni_pagedir[1]++; if (!vgacon_uni_pagedir[0] && p) con_set_default_unimap(c); /* Only set the default if the user didn't deliberately override it */ if (global_cursor_default == -1) global_cursor_default = !(screen_info.flags & VIDEO_FLAGS_NOCURSOR); } static void vgacon_deinit(struct vc_data *c) { /* When closing the active console, reset video origin */ if (CON_IS_VISIBLE(c)) { c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); } if (!--vgacon_uni_pagedir[1]) con_free_unimap(c); c->vc_uni_pagedir_loc = &c->vc_uni_pagedir; con_set_default_unimap(c); } static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity, u8 blink, u8 underline, u8 reverse, u8 italic) { u8 attr = color; if (vga_can_do_color) { if (italic) attr = (attr & 0xF0) | c->vc_itcolor; else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; else if (intensity == 0) attr = (attr & 0xf0) | c->vc_halfcolor; } if (reverse) attr = ((attr) & 0x88) | ((((attr) >> 4) | ((attr) << 4)) & 0x77); if (blink) attr ^= 0x80; if (intensity == 2) attr ^= 0x08; if (!vga_can_do_color) { if (italic) attr = (attr & 0xF8) | 0x02; else if (underline) attr = (attr & 0xf8) | 0x01; else if (intensity == 0) attr = (attr & 0xf0) | 0x08; } return attr; } static void vgacon_invert_region(struct vc_data *c, u16 * p, int count) { int col = vga_can_do_color; while (count--) { u16 a = scr_readw(p); if (col) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); else a ^= ((a & 0x0700) == 0x0100) ? 0x7000 : 0x7700; scr_writew(a, p++); } } static void vgacon_set_cursor_size(int xpos, int from, int to) { unsigned long flags; int curs, cure; #ifdef TRIDENT_GLITCH if (xpos < 16) from--, to--; #endif if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto)) return; cursor_size_lastfrom = from; cursor_size_lastto = to; spin_lock_irqsave(&vga_lock, flags); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); curs = inb_p(vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); cure = inb_p(vga_video_port_val); } else { curs = 0; cure = 0; } curs = (curs & 0xc0) | from; cure = (cure & 0xe0) | to; outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); outb_p(curs, vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); outb_p(cure, vga_video_port_val); spin_unlock_irqrestore(&vga_lock, flags); } static void vgacon_cursor(struct vc_data *c, int mode) { if (c->vc_mode != KD_TEXT) return; vgacon_restore_screen(c); switch (mode) { case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->vc_x, 31, 30); else vgacon_set_cursor_size(c->vc_x, 31, 31); break; case CM_MOVE: case CM_DRAW: write_vga(14, (c->vc_pos - vga_vram_base) / 2); switch (c->vc_cursor_type & 0x0f) { case CUR_UNDERLINE: vgacon_set_cursor_size(c->vc_x, c->vc_font.height - (c->vc_font.height < 10 ? 2 : 3), c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: vgacon_set_cursor_size(c->vc_x, c->vc_font.height / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: vgacon_set_cursor_size(c->vc_x, (c->vc_font.height * 2) / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: vgacon_set_cursor_size(c->vc_x, c->vc_font.height / 2, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_NONE: if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->vc_x, 31, 30); else vgacon_set_cursor_size(c->vc_x, 31, 31); break; default: vgacon_set_cursor_size(c->vc_x, 1, c->vc_font.height); break; } break; } } static int vgacon_doresize(struct vc_data *c, unsigned int width, unsigned int height) { unsigned long flags; unsigned int scanlines = height * c->vc_font.height; u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; spin_lock_irqsave(&vga_lock, flags); vgacon_xres = width * VGA_FONTWIDTH; vgacon_yres = height * c->vc_font.height; if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); max_scan = inb_p(vga_video_port_val); if (max_scan & 0x80) scanlines <<= 1; outb_p(VGA_CRTC_MODE, vga_video_port_reg); mode = inb_p(vga_video_port_val); if (mode & 0x04) scanlines >>= 1; scanlines -= 1; scanlines_lo = scanlines & 0xff; outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); r7 = inb_p(vga_video_port_val) & ~0x42; if (scanlines & 0x100) r7 |= 0x02; if (scanlines & 0x200) r7 |= 0x40; /* deprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); vsync_end = inb_p(vga_video_port_val); outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end & ~0x80, vga_video_port_val); } outb_p(VGA_CRTC_H_DISP, vga_video_port_reg); outb_p(width - 1, vga_video_port_val); outb_p(VGA_CRTC_OFFSET, vga_video_port_reg); outb_p(width >> 1, vga_video_port_val); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_V_DISP_END, vga_video_port_reg); outb_p(scanlines_lo, vga_video_port_val); outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); outb_p(r7,vga_video_port_val); /* reprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end, vga_video_port_val); } spin_unlock_irqrestore(&vga_lock, flags); return 0; } static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; int y = c->vc_rows * c->vc_font.height; int rows = screen_info.orig_video_lines * vga_default_font_height/ c->vc_font.height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to * set size of freshly allocated screens ourselves. */ vga_video_num_columns = c->vc_cols; vga_video_num_lines = c->vc_rows; /* We can only copy out the size of the video buffer here, * otherwise we get into VGA BIOS */ if (!vga_is_gfx) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); if ((vgacon_xres != x || vgacon_yres != y) && (!(vga_video_num_columns % 2) && vga_video_num_columns <= screen_info.orig_video_cols && vga_video_num_lines <= rows)) vgacon_doresize(c, c->vc_cols, c->vc_rows); } vgacon_scrollback_init(c->vc_size_row); return 0; /* Redrawing not needed */ } static void vga_set_palette(struct vc_data *vc, unsigned char *table) { int i, j; vga_w(state.vgabase, VGA_PEL_MSK, 0xff); for (i = j = 0; i < 16; i++) { vga_w(state.vgabase, VGA_PEL_IW, table[i]); vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); } } static int vgacon_set_palette(struct vc_data *vc, unsigned char *table) { #ifdef CAN_LOAD_PALETTE if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked || !CON_IS_VISIBLE(vc)) return -EINVAL; vga_set_palette(vc, table); return 0; #else return -EINVAL; #endif } /* structure holding original VGA register settings */ static struct { unsigned char SeqCtrlIndex; /* Sequencer Index reg. */ unsigned char CrtCtrlIndex; /* CRT-Contr. Index reg. */ unsigned char CrtMiscIO; /* Miscellaneous register */ unsigned char HorizontalTotal; /* CRT-Controller:00h */ unsigned char HorizDisplayEnd; /* CRT-Controller:01h */ unsigned char StartHorizRetrace; /* CRT-Controller:04h */ unsigned char EndHorizRetrace; /* CRT-Controller:05h */ unsigned char Overflow; /* CRT-Controller:07h */ unsigned char StartVertRetrace; /* CRT-Controller:10h */ unsigned char EndVertRetrace; /* CRT-Controller:11h */ unsigned char ModeControl; /* CRT-Controller:17h */ unsigned char ClockingMode; /* Seq-Controller:01h */ } vga_state; static void vga_vesa_blank(struct vgastate *state, int mode) { /* save original values of VGA controller registers */ if (!vga_vesa_blanked) { spin_lock_irq(&vga_lock); vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); spin_unlock_irq(&vga_lock); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ vga_state.HorizontalTotal = inb_p(vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ vga_state.HorizDisplayEnd = inb_p(vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ vga_state.StartHorizRetrace = inb_p(vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ vga_state.EndHorizRetrace = inb_p(vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ vga_state.Overflow = inb_p(vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ vga_state.StartVertRetrace = inb_p(vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ vga_state.EndVertRetrace = inb_p(vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ vga_state.ModeControl = inb_p(vga_video_port_val); vga_state.ClockingMode = vga_rseq(state->vgabase, VGA_SEQ_CLOCK_MODE); } /* assure that video is enabled */ /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ spin_lock_irq(&vga_lock); vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); /* test for vertical retrace in process.... */ if ((vga_state.CrtMiscIO & 0x80) == 0x80) vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO & 0xEF); /* * Set <End of vertical retrace> to minimum (0) and * <Start of vertical Retrace> to maximum (incl. overflow) * Result: turn off vertical sync (VSync) pulse. */ if (mode & VESA_VSYNC_SUSPEND) { outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(0xff, vga_video_port_val); /* maximum value */ outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(0x40, vga_video_port_val); /* minimum (bits 0..3) */ outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow | 0x84, vga_video_port_val); /* bits 9,10 of vert. retrace */ } if (mode & VESA_HSYNC_SUSPEND) { /* * Set <End of horizontal retrace> to minimum (0) and * <Start of horizontal Retrace> to maximum * Result: turn off horizontal sync (HSync) pulse. */ outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(0xff, vga_video_port_val); /* maximum */ outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(0x00, vga_video_port_val); /* minimum (0) */ } /* restore both index registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); spin_unlock_irq(&vga_lock); } static void vga_vesa_unblank(struct vgastate *state) { /* restore original values of VGA controller registers */ spin_lock_irq(&vga_lock); vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ outb_p(vga_state.HorizontalTotal, vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ outb_p(vga_state.HorizDisplayEnd, vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(vga_state.StartHorizRetrace, vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(vga_state.EndHorizRetrace, vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow, vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(vga_state.StartVertRetrace, vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(vga_state.EndVertRetrace, vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ outb_p(vga_state.ModeControl, vga_video_port_val); /* ClockingMode */ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode); /* restore index/control registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); spin_unlock_irq(&vga_lock); } static void vga_pal_blank(struct vgastate *state) { int i; vga_w(state->vgabase, VGA_PEL_MSK, 0xff); for (i = 0; i < 16; i++) { vga_w(state->vgabase, VGA_PEL_IW, i); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); } } static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) { switch (blank) { case 0: /* Unblank */ if (vga_vesa_blanked) { vga_vesa_unblank(&state); vga_vesa_blanked = 0; } if (vga_palette_blanked) { vga_set_palette(c, color_table); vga_palette_blanked = 0; return 0; } vga_is_gfx = 0; /* Tell console.c that it has to restore the screen itself */ return 1; case 1: /* Normal blanking */ case -1: /* Obsolete */ if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { vga_pal_blank(&state); vga_palette_blanked = 1; return 0; } vgacon_set_origin(c); scr_memsetw((void *) vga_vram_base, BLANK, c->vc_screenbuf_size); if (mode_switch) vga_is_gfx = 1; return 1; default: /* VESA blanking */ if (vga_video_type == VIDEO_TYPE_VGAC) { vga_vesa_blank(&state, blank - 1); vga_vesa_blanked = blank; } return 0; } } /* * PIO_FONT support. * * The font loading code goes back to the codepage package by * Joel Hoffman (joel@wam.umd.edu). (He reports that the original * reference is: "From: p. 307 of _Programmer's Guide to PC & PS/2 * Video Systems_ by Richard Wilton. 1987. Microsoft Press".) * * Change for certain monochrome monitors by Yury Shevchuck * (sizif@botik.yaroslavl.su). */ #ifdef CAN_LOAD_EGA_FONTS #define colourmap 0xa0000 /* Pauline Middelink <middelin@polyware.iaf.nl> reports that we should use 0xA0000 for the bwmap as well.. */ #define blackwmap 0xa0000 #define cmapsz 8192 static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512) { unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; char *charmap; if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; #ifdef VGA_CAN_DO_64KB if (vga_video_type == VIDEO_TYPE_VGAC) beg = 0x06; #endif } else { charmap = (char *) VGA_MAP_MEM(blackwmap, 0); beg = 0x0a; } #ifdef BROKEN_GRAPHICS_PROGRAMS /* * All fonts are loaded in slot 0 (0:1 for 512 ch) */ if (!arg) return -EINVAL; /* Return to default font not supported */ vga_font_is_default = 0; font_select = ch512 ? 0x04 : 0x00; #else /* * The default font is kept in slot 0 and is never touched. * A custom font is loaded in slot 2 (256 ch) or 2:3 (512 ch) */ if (set) { vga_font_is_default = !arg; if (!arg) ch512 = 0; /* Default font is always 256 */ font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00; } if (!vga_font_is_default) charmap += 4 * cmapsz; #endif spin_lock_irq(&vga_lock); /* First, the Sequencer */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); /* CPU writes only to map 2 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x04); /* Sequential addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x07); /* Clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 2 */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x02); /* disable odd-even addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); /* map start at A000:0000 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); spin_unlock_irq(&vga_lock); if (arg) { if (set) for (i = 0; i < cmapsz; i++) vga_writeb(arg[i], charmap + i); else for (i = 0; i < cmapsz; i++) arg[i] = vga_readb(charmap + i); /* * In 512-character mode, the character map is not contiguous if * we want to remain EGA compatible -- which we do */ if (ch512) { charmap += 2 * cmapsz; arg += cmapsz; if (set) for (i = 0; i < cmapsz; i++) vga_writeb(arg[i], charmap + i); else for (i = 0; i < cmapsz; i++) arg[i] = vga_readb(charmap + i); } } spin_lock_irq(&vga_lock); /* First, the sequencer, Synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); /* CPU writes to maps 0 and 1 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x03); /* odd-even addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x03); /* Character Map Select */ if (set) vga_wseq(state->vgabase, VGA_SEQ_CHARACTER_MAP, font_select); /* clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 0 for CPU */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x00); /* enable even-odd addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x10); /* map starts at b800:0 or b000:0 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, beg); /* if 512 char mode is already enabled don't re-enable it. */ if ((set) && (ch512 != vga_512_chars)) { /* attribute controller */ for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) c->vc_hi_font_mask = ch512 ? 0x0800 : 0; } vga_512_chars = ch512; /* 256-char: enable intensity bit 512-char: disable intensity bit */ inb_p(video_port_status); /* clear address flip-flop */ /* color plane enable register */ vga_wattr(state->vgabase, VGA_ATC_PLANE_ENABLE, ch512 ? 0x07 : 0x0f); /* Wilton (1987) mentions the following; I don't know what it means, but it works, and it appears necessary */ inb_p(video_port_status); vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); } spin_unlock_irq(&vga_lock); return 0; } /* * Adjust the screen to fit a font of a certain height */ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) { unsigned char ovr, vde, fsr; int rows, maxscan, i; rows = vc->vc_scan_lines / fontheight; /* Number of video rows we end up with */ maxscan = rows * fontheight - 1; /* Scan lines to actually display-1 */ /* Reprogram the CRTC for the new font size Note: the attempt to read the overflow register will fail on an EGA, but using 0xff for the previous value appears to be OK for EGA text modes in the range 257-512 scan lines, so I guess we don't need to worry about it. The same applies for the spill bits in the font size and cursor registers; they are write-only on EGA, but it appears that they are all don't care bits on EGA, so I guess it doesn't matter. */ spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ ovr = inb_p(vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size register */ fsr = inb_p(vga_video_port_val); spin_unlock_irq(&vga_lock); vde = maxscan & 0xff; /* Vertical display end reg */ ovr = (ovr & 0xbd) + /* Overflow register */ ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ outb_p(ovr, vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size */ outb_p(fsr, vga_video_port_val); outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ outb_p(vde, vga_video_port_val); spin_unlock_irq(&vga_lock); vga_video_font_height = fontheight; for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) { if (CON_IS_VISIBLE(c)) { /* void size to cause regs to be rewritten */ cursor_size_lastfrom = 0; cursor_size_lastto = 0; c->vc_sw->con_cursor(c, CM_DRAW); } c->vc_font.height = fontheight; vc_resize(c, 0, rows); /* Adjust console size */ } } return 0; } static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned flags) { unsigned charcount = font->charcount; int rc; if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; if (font->width != VGA_FONTWIDTH || (charcount != 256 && charcount != 512)) return -EINVAL; rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512); if (rc) return rc; if (!(flags & KD_FONT_FLAG_DONT_RECALC)) rc = vgacon_adjust_height(c, font->height); return rc; } static int vgacon_font_get(struct vc_data *c, struct console_font *font) { if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; font->width = VGA_FONTWIDTH; font->height = c->vc_font.height; font->charcount = vga_512_chars ? 512 : 256; if (!font->data) return 0; return vgacon_do_font_op(&state, font->data, 0, vga_512_chars); } #else #define vgacon_font_set NULL #define vgacon_font_get NULL #endif static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) /* let svgatextmode tinker with video timings and return success */ return (user) ? 0 : -EINVAL; if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); return 0; } static int vgacon_set_origin(struct vc_data *c) { if (vga_is_gfx || /* We don't play origin tricks in graphic modes */ (console_blanked && !vga_palette_blanked)) /* Nor we write to blanked screens */ return 0; c->vc_origin = c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); vga_rolled_over = 0; return 1; } static void vgacon_save_screen(struct vc_data *c) { static int vga_bootup_console = 0; if (!vga_bootup_console) { /* This is a gross hack, but here is the only place we can * set bootup console parameters without messing up generic * console initialization routines. */ vga_bootup_console = 1; c->vc_x = screen_info.orig_x; c->vc_y = screen_info.orig_y; } /* We can't copy in more than the size of the video buffer, * or we'll be copying in VGA BIOS */ if (!vga_is_gfx) scr_memcpyw((u16 *) c->vc_screenbuf, (u16 *) c->vc_origin, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); } static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) { unsigned long oldo; unsigned int delta; if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT) return 0; if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) return 0; vgacon_restore_screen(c); oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_base; vga_rolled_over = oldo - vga_vram_base; } else c->vc_origin += delta; scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - delta), c->vc_video_erase_char, delta); } else { if (oldo - delta < vga_vram_base) { scr_memmovew((u16 *) (vga_vram_end - c->vc_screenbuf_size + delta), (u16 *) oldo, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; } else c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, delta); } c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c->vc_visible_origin = c->vc_origin; vga_set_mem_top(c); c->vc_pos = (c->vc_pos - oldo) + c->vc_origin; return 1; } /* * The console `switch' structure for the VGA based console */ static int vgacon_dummy(struct vc_data *c) { return 0; } #define DUMMY (void *) vgacon_dummy const struct consw vga_con = { .owner = THIS_MODULE, .con_startup = vgacon_startup, .con_init = vgacon_init, .con_deinit = vgacon_deinit, .con_clear = DUMMY, .con_putc = DUMMY, .con_putcs = DUMMY, .con_cursor = vgacon_cursor, .con_scroll = vgacon_scroll, .con_bmove = DUMMY, .con_switch = vgacon_switch, .con_blank = vgacon_blank, .con_font_set = vgacon_font_set, .con_font_get = vgacon_font_get, .con_resize = vgacon_resize, .con_set_palette = vgacon_set_palette, .con_scrolldelta = vgacon_scrolldelta, .con_set_origin = vgacon_set_origin, .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, }; MODULE_LICENSE("GPL");
gpl-2.0
faux123/flo_nexus7
virt/kvm/iommu.c
3014
8475
/* * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Copyright (C) 2006-2008 Intel Corporation * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Author: Allen M. Kay <allen.m.kay@intel.com> * Author: Weidong Han <weidong.han@intel.com> * Author: Ben-Ami Yassour <benami@il.ibm.com> */ #include <linux/list.h> #include <linux/kvm_host.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/dmar.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> static bool allow_unsafe_assigned_interrupts; module_param_named(allow_unsafe_assigned_interrupts, allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(allow_unsafe_assigned_interrupts, "Enable device assignment on platforms without interrupt remapping support."); static int kvm_iommu_unmap_memslots(struct kvm *kvm); static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long size) { gfn_t end_gfn; pfn_t pfn; pfn = gfn_to_pfn_memslot(kvm, slot, gfn); end_gfn = gfn + (size >> PAGE_SHIFT); gfn += 1; if (is_error_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(kvm, slot, gfn++); return pfn; } int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ | IOMMU_WRITE; if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(kvm, slot, gfn, page_size); if (is_error_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); return r; } static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; } int kvm_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct pci_dev *pdev = NULL; struct iommu_domain *domain = kvm->arch.iommu_domain; int r, last_flags; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { printk(KERN_ERR "assign device %x:%x:%x.%x failed", pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); return r; } last_flags = kvm->arch.iommu_flags; if (iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_CACHE_COHERENCY)) kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY; /* Check if need to update IOMMU page table for guest memory */ if ((last_flags ^ kvm->arch.iommu_flags) == KVM_IOMMU_CACHE_COHERENCY) { kvm_iommu_unmap_memslots(kvm); r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; printk(KERN_DEBUG "assign device %x:%x:%x.%x\n", assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; } int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n", assigned_dev->host_segnr, assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; } int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type, 0); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_domain_has_cap(kvm->arch.iommu_domain, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enble with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; } static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); } static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; } } void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); } static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); return 0; } int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }
gpl-2.0
TeamBliss-Devices/android_kernel_asus_grouper
drivers/media/video/tda9840.c
3270
5787
/* tda9840 - i2c-driver for the tda9840 by SGS Thomson Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> The tda9840 is a stereo/dual sound processor with digital identification. It can be found at address 0x84 on the i2c-bus. For detailed informations download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("tda9840 driver"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define SWITCH 0x00 #define LEVEL_ADJUST 0x02 #define STEREO_ADJUST 0x03 #define TEST 0x04 #define TDA9840_SET_MUTE 0x00 #define TDA9840_SET_MONO 0x10 #define TDA9840_SET_STEREO 0x2a #define TDA9840_SET_LANG1 0x12 #define TDA9840_SET_LANG2 0x1e #define TDA9840_SET_BOTH 0x1a #define TDA9840_SET_BOTH_R 0x16 #define TDA9840_SET_EXTERNAL 0x7a static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (i2c_smbus_write_byte_data(client, reg, val)) v4l2_dbg(1, debug, sd, "error writing %02x to %02x\n", val, reg); } static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { int byte; if (t->index) return -EINVAL; switch (t->audmode) { case V4L2_TUNER_MODE_STEREO: byte = TDA9840_SET_STEREO; break; case V4L2_TUNER_MODE_LANG1_LANG2: byte = TDA9840_SET_BOTH; break; case V4L2_TUNER_MODE_LANG1: byte = TDA9840_SET_LANG1; break; case V4L2_TUNER_MODE_LANG2: byte = TDA9840_SET_LANG2; break; default: byte = TDA9840_SET_MONO; break; } v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte); tda9840_write(sd, SWITCH, byte); return 0; } static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 byte; t->rxsubchans = V4L2_TUNER_SUB_MONO; if (1 != i2c_master_recv(client, &byte, 1)) { v4l2_dbg(1, debug, sd, "i2c_master_recv() failed\n"); return -EIO; } if (byte & 0x80) { v4l2_dbg(1, debug, sd, "TDA9840_DETECT: register contents invalid\n"); return -EINVAL; } v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte); switch (byte & 0x60) { case 0x00: t->rxsubchans = V4L2_TUNER_SUB_MONO; break; case 0x20: t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 0x40: t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; break; default: /* Incorrect detect */ t->rxsubchans = V4L2_TUNER_MODE_MONO; break; } return 0; } static int tda9840_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TDA9840, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tda9840_core_ops = { .g_chip_ident = tda9840_g_chip_ident, }; static const struct v4l2_subdev_tuner_ops tda9840_tuner_ops = { .s_tuner = tda9840_s_tuner, .g_tuner = tda9840_g_tuner, }; static const struct v4l2_subdev_ops tda9840_ops = { .core = &tda9840_core_ops, .tuner = &tda9840_tuner_ops, }; /* ----------------------------------------------------------------------- */ static int tda9840_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct v4l2_subdev *sd; /* let's see whether this adapter can support what we need */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); /* set initial values for level & stereo - adjustment, mode */ tda9840_write(sd, LEVEL_ADJUST, 0); tda9840_write(sd, STEREO_ADJUST, 0); tda9840_write(sd, SWITCH, TDA9840_SET_STEREO); return 0; } static int tda9840_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(sd); return 0; } static const struct i2c_device_id tda9840_id[] = { { "tda9840", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tda9840_id); static struct i2c_driver tda9840_driver = { .driver = { .owner = THIS_MODULE, .name = "tda9840", }, .probe = tda9840_probe, .remove = tda9840_remove, .id_table = tda9840_id, }; static __init int init_tda9840(void) { return i2c_add_driver(&tda9840_driver); } static __exit void exit_tda9840(void) { i2c_del_driver(&tda9840_driver); } module_init(init_tda9840); module_exit(exit_tda9840);
gpl-2.0
eyosen/kernel_htc_endeavoru
arch/powerpc/sysdev/qe_lib/ucc.c
3782
5577
/* * arch/powerpc/sysdev/qe_lib/ucc.c * * QE UCC API Set - UCC specific routines implementations. * * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved. * * Authors: Shlomi Gridish <gridish@freescale.com> * Li Yang <leoli@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/immap_qe.h> #include <asm/qe.h> #include <asm/ucc.h> int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) { unsigned long flags; if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; spin_lock_irqsave(&cmxgcr_lock, flags); clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; } EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); /* Configure the UCC to either Slow or Fast. * * A given UCC can be figured to support either "slow" devices (e.g. UART) * or "fast" devices (e.g. Ethernet). * * 'ucc_num' is the UCC number, from 0 - 7. * * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit * must always be set to 1. */ int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) { u8 __iomem *guemr; /* The GUEMR register is at the same location for both slow and fast devices, so we just use uccX.slow.guemr. */ switch (ucc_num) { case 0: guemr = &qe_immr->ucc1.slow.guemr; break; case 1: guemr = &qe_immr->ucc2.slow.guemr; break; case 2: guemr = &qe_immr->ucc3.slow.guemr; break; case 3: guemr = &qe_immr->ucc4.slow.guemr; break; case 4: guemr = &qe_immr->ucc5.slow.guemr; break; case 5: guemr = &qe_immr->ucc6.slow.guemr; break; case 6: guemr = &qe_immr->ucc7.slow.guemr; break; case 7: guemr = &qe_immr->ucc8.slow.guemr; break; default: return -EINVAL; } clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, UCC_GUEMR_SET_RESERVED3 | speed); return 0; } static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr, unsigned int *reg_num, unsigned int *shift) { unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); *reg_num = cmx + 1; *cmxucr = &qe_immr->qmx.cmxucr[cmx]; *shift = 16 - 8 * (ucc_num & 2); } int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) { __be32 __iomem *cmxucr; unsigned int reg_num; unsigned int shift; /* check if the UCC number is in range. */ if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); if (set) setbits32(cmxucr, mask << shift); else clrbits32(cmxucr, mask << shift); return 0; } int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, enum comm_dir mode) { __be32 __iomem *cmxucr; unsigned int reg_num; unsigned int shift; u32 clock_bits = 0; /* check if the UCC number is in range. */ if (ucc_num > UCC_MAX_NUM - 1) return -EINVAL; /* The communications direction must be RX or TX */ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) return -EINVAL; get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift); switch (reg_num) { case 1: switch (clock) { case QE_BRG1: clock_bits = 1; break; case QE_BRG2: clock_bits = 2; break; case QE_BRG7: clock_bits = 3; break; case QE_BRG8: clock_bits = 4; break; case QE_CLK9: clock_bits = 5; break; case QE_CLK10: clock_bits = 6; break; case QE_CLK11: clock_bits = 7; break; case QE_CLK12: clock_bits = 8; break; case QE_CLK15: clock_bits = 9; break; case QE_CLK16: clock_bits = 10; break; default: break; } break; case 2: switch (clock) { case QE_BRG5: clock_bits = 1; break; case QE_BRG6: clock_bits = 2; break; case QE_BRG7: clock_bits = 3; break; case QE_BRG8: clock_bits = 4; break; case QE_CLK13: clock_bits = 5; break; case QE_CLK14: clock_bits = 6; break; case QE_CLK19: clock_bits = 7; break; case QE_CLK20: clock_bits = 8; break; case QE_CLK15: clock_bits = 9; break; case QE_CLK16: clock_bits = 10; break; default: break; } break; case 3: switch (clock) { case QE_BRG9: clock_bits = 1; break; case QE_BRG10: clock_bits = 2; break; case QE_BRG15: clock_bits = 3; break; case QE_BRG16: clock_bits = 4; break; case QE_CLK3: clock_bits = 5; break; case QE_CLK4: clock_bits = 6; break; case QE_CLK17: clock_bits = 7; break; case QE_CLK18: clock_bits = 8; break; case QE_CLK7: clock_bits = 9; break; case QE_CLK8: clock_bits = 10; break; case QE_CLK16: clock_bits = 11; break; default: break; } break; case 4: switch (clock) { case QE_BRG13: clock_bits = 1; break; case QE_BRG14: clock_bits = 2; break; case QE_BRG15: clock_bits = 3; break; case QE_BRG16: clock_bits = 4; break; case QE_CLK5: clock_bits = 5; break; case QE_CLK6: clock_bits = 6; break; case QE_CLK21: clock_bits = 7; break; case QE_CLK22: clock_bits = 8; break; case QE_CLK7: clock_bits = 9; break; case QE_CLK8: clock_bits = 10; break; case QE_CLK16: clock_bits = 11; break; default: break; } break; default: break; } /* Check for invalid combination of clock and UCC number */ if (!clock_bits) return -ENOENT; if (mode == COMM_DIR_RX) shift += 4; clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, clock_bits << shift); return 0; }
gpl-2.0
penreturns/AK-OnePone
arch/powerpc/mm/stab.c
4550
7645
/* * PowerPC64 Segment Translation Support. * * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com * Copyright (c) 2001 Dave Engebretsen * * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/memblock.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/paca.h> #include <asm/cputable.h> #include <asm/prom.h> #include <asm/abs_addr.h> struct stab_entry { unsigned long esid_data; unsigned long vsid_data; }; #define NR_STAB_CACHE_ENTRIES 8 static DEFINE_PER_CPU(long, stab_cache_ptr); static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache); /* * Create a segment table entry for the given esid/vsid pair. */ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) { unsigned long esid_data, vsid_data; unsigned long entry, group, old_esid, castout_entry, i; unsigned int global_entry; struct stab_entry *ste, *castout_ste; unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; vsid_data = vsid << STE_VSID_SHIFT; esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; if (! kernel_segment) esid_data |= STE_ESID_KS; /* Search the primary group first. */ global_entry = (esid & 0x1f) << 3; ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); /* Find an empty entry, if one exists. */ for (group = 0; group < 2; group++) { for (entry = 0; entry < 8; entry++, ste++) { if (!(ste->esid_data & STE_ESID_V)) { ste->vsid_data = vsid_data; eieio(); ste->esid_data = esid_data; return (global_entry | entry); } } /* Now search the secondary group. */ global_entry = ((~esid) & 0x1f) << 3; ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); } /* * Could not find empty entry, pick one with a round robin selection. * Search all entries in the two groups. */ castout_entry = get_paca()->stab_rr; for (i = 0; i < 16; i++) { if (castout_entry < 8) { global_entry = (esid & 0x1f) << 3; ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7)); castout_ste = ste + castout_entry; } else { global_entry = ((~esid) & 0x1f) << 3; ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7)); castout_ste = ste + (castout_entry - 8); } /* Dont cast out the first kernel segment */ if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) break; castout_entry = (castout_entry + 1) & 0xf; } get_paca()->stab_rr = (castout_entry + 1) & 0xf; /* Modify the old entry to the new value. */ /* Force previous translations to complete. DRENG */ asm volatile("isync" : : : "memory"); old_esid = castout_ste->esid_data >> SID_SHIFT; castout_ste->esid_data = 0; /* Invalidate old entry */ asm volatile("sync" : : : "memory"); /* Order update */ castout_ste->vsid_data = vsid_data; eieio(); /* Order update */ castout_ste->esid_data = esid_data; asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT)); /* Ensure completion of slbie */ asm volatile("sync" : : : "memory"); return (global_entry | (castout_entry & 0x7)); } /* * Allocate a segment table entry for the given ea and mm */ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) { unsigned long vsid; unsigned char stab_entry; unsigned long offset; /* Kernel or user address? */ if (is_kernel_addr(ea)) { vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); } else { if ((ea >= TASK_SIZE_USER64) || (! mm)) return 1; vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M); } stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); if (!is_kernel_addr(ea)) { offset = __get_cpu_var(stab_cache_ptr); if (offset < NR_STAB_CACHE_ENTRIES) __get_cpu_var(stab_cache[offset++]) = stab_entry; else offset = NR_STAB_CACHE_ENTRIES+1; __get_cpu_var(stab_cache_ptr) = offset; /* Order update */ asm volatile("sync":::"memory"); } return 0; } int ste_allocate(unsigned long ea) { return __ste_allocate(ea, current->mm); } /* * Do the segment table work for a context switch: flush all user * entries from the table, then preload some probably useful entries * for the new task */ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) { struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; struct stab_entry *ste; unsigned long offset; unsigned long pc = KSTK_EIP(tsk); unsigned long stack = KSTK_ESP(tsk); unsigned long unmapped_base; /* Force previous translations to complete. DRENG */ asm volatile("isync" : : : "memory"); /* * We need interrupts hard-disabled here, not just soft-disabled, * so that a PMU interrupt can't occur, which might try to access * user memory (to get a stack trace) and possible cause an STAB miss * which would update the stab_cache/stab_cache_ptr per-cpu variables. */ hard_irq_disable(); offset = __get_cpu_var(stab_cache_ptr); if (offset <= NR_STAB_CACHE_ENTRIES) { int i; for (i = 0; i < offset; i++) { ste = stab + __get_cpu_var(stab_cache[i]); ste->esid_data = 0; /* invalidate entry */ } } else { unsigned long entry; /* Invalidate all entries. */ ste = stab; /* Never flush the first entry. */ ste += 1; for (entry = 1; entry < (HW_PAGE_SIZE / sizeof(struct stab_entry)); entry++, ste++) { unsigned long ea; ea = ste->esid_data & ESID_MASK; if (!is_kernel_addr(ea)) { ste->esid_data = 0; } } } asm volatile("sync; slbia; sync":::"memory"); __get_cpu_var(stab_cache_ptr) = 0; /* Now preload some entries for the new task */ if (test_tsk_thread_flag(tsk, TIF_32BIT)) unmapped_base = TASK_UNMAPPED_BASE_USER32; else unmapped_base = TASK_UNMAPPED_BASE_USER64; __ste_allocate(pc, mm); if (GET_ESID(pc) == GET_ESID(stack)) return; __ste_allocate(stack, mm); if ((GET_ESID(pc) == GET_ESID(unmapped_base)) || (GET_ESID(stack) == GET_ESID(unmapped_base))) return; __ste_allocate(unmapped_base, mm); /* Order update */ asm volatile("sync" : : : "memory"); } /* * Allocate segment tables for secondary CPUs. These must all go in * the first (bolted) segment, so that do_stab_bolted won't get a * recursive segment miss on the segment table itself. */ void __init stabs_alloc(void) { int cpu; if (mmu_has_feature(MMU_FTR_SLB)) return; for_each_possible_cpu(cpu) { unsigned long newstab; if (cpu == 0) continue; /* stab for CPU 0 is statically allocated */ newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 1<<SID_SHIFT); newstab = (unsigned long)__va(newstab); memset((void *)newstab, 0, HW_PAGE_SIZE); paca[cpu].stab_addr = newstab; paca[cpu].stab_real = virt_to_abs(newstab); printk(KERN_INFO "Segment table for CPU %d at 0x%llx " "virtual, 0x%llx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); } } /* * Build an entry for the base kernel segment and put it into * the segment table or SLB. All other segment table or SLB * entries are faulted in. */ void stab_initialize(unsigned long stab) { unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M); unsigned long stabreal; asm volatile("isync; slbia; isync":::"memory"); make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); /* Order update */ asm volatile("sync":::"memory"); /* Set ASR */ stabreal = get_paca()->stab_real | 0x1ul; mtspr(SPRN_ASR, stabreal); }
gpl-2.0
finnq/android_kernel_lge_g3
drivers/media/radio/radio-keene.c
4806
12036
/* * Copyright (c) 2012 Hans Verkuil <hverkuil@xs4all.nl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* kernel includes */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <linux/usb.h> #include <linux/version.h> #include <linux/mutex.h> /* driver and module definitions */ MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); MODULE_DESCRIPTION("Keene FM Transmitter driver"); MODULE_LICENSE("GPL"); /* Actually, it advertises itself as a Logitech */ #define USB_KEENE_VENDOR 0x046d #define USB_KEENE_PRODUCT 0x0a0e /* Probably USB_TIMEOUT should be modified in module parameter */ #define BUFFER_LENGTH 8 #define USB_TIMEOUT 500 /* Frequency limits in MHz */ #define FREQ_MIN 76U #define FREQ_MAX 108U #define FREQ_MUL 16000U /* USB Device ID List */ static struct usb_device_id usb_keene_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_KEENE_VENDOR, USB_KEENE_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_keene_device_table); struct keene_device { struct usb_device *usbdev; struct usb_interface *intf; struct video_device vdev; struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; struct mutex lock; u8 *buffer; unsigned curfreq; u8 tx; u8 pa; bool stereo; bool muted; bool preemph_75_us; }; static inline struct keene_device *to_keene_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct keene_device, v4l2_dev); } /* Set frequency (if non-0), PA, mute and turn on/off the FM transmitter. */ static int keene_cmd_main(struct keene_device *radio, unsigned freq, bool play) { unsigned short freq_send = freq ? (freq - 76 * 16000) / 800 : 0; int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x50; radio->buffer[2] = (freq_send >> 8) & 0xff; radio->buffer[3] = freq_send & 0xff; radio->buffer[4] = radio->pa; /* If bit 4 is set, then tune to the frequency. If bit 3 is set, then unmute; if bit 2 is set, then mute. If bit 1 is set, then enter idle mode; if bit 0 is set, then enter transit mode. */ radio->buffer[5] = (radio->muted ? 4 : 8) | (play ? 1 : 2) | (freq ? 0x10 : 0); radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } if (freq) radio->curfreq = freq; return 0; } /* Set TX, stereo and preemphasis mode (50 us vs 75 us). */ static int keene_cmd_set(struct keene_device *radio) { int ret; radio->buffer[0] = 0x00; radio->buffer[1] = 0x51; radio->buffer[2] = radio->tx; /* If bit 0 is set, then transmit mono, otherwise stereo. If bit 2 is set, then enable 75 us preemphasis, otherwise it is 50 us. */ radio->buffer[3] = (!radio->stereo) | (radio->preemph_75_us ? 4 : 0); radio->buffer[4] = 0x00; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), 9, 0x21, 0x200, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(&radio->vdev.dev, "%s failed (%d)\n", __func__, ret); return ret; } return 0; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_keene_device_release. */ static void usb_keene_disconnect(struct usb_interface *intf) { struct keene_device *radio = to_keene_dev(usb_get_intfdata(intf)); v4l2_device_get(&radio->v4l2_dev); mutex_lock(&radio->lock); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->vdev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct keene_device *radio = video_drvdata(file); strlcpy(v->driver, "radio-keene", sizeof(v->driver)); strlcpy(v->card, "Keene FM Transmitter", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); v->device_caps = V4L2_CAP_RADIO | V4L2_CAP_MODULATOR; v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; strlcpy(v->name, "FM", sizeof(v->name)); v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->txsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; return 0; } static int vidioc_s_modulator(struct file *file, void *priv, struct v4l2_modulator *v) { struct keene_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; radio->stereo = (v->txsubchans == V4L2_TUNER_SUB_STEREO); return keene_cmd_set(radio); } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; f->frequency = clamp(f->frequency, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL); return keene_cmd_main(radio, f->frequency, true); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct keene_device *radio = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } static int keene_s_ctrl(struct v4l2_ctrl *ctrl) { static const u8 db2tx[] = { /* -15, -12, -9, -6, -3, 0 dB */ 0x03, 0x13, 0x02, 0x12, 0x22, 0x32, /* 3, 6, 9, 12, 15, 18 dB */ 0x21, 0x31, 0x20, 0x30, 0x40, 0x50 }; struct keene_device *radio = container_of(ctrl->handler, struct keene_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: radio->muted = ctrl->val; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_POWER_LEVEL: /* To go from dBuV to the register value we apply the following formula: */ radio->pa = (ctrl->val - 71) * 100 / 62; return keene_cmd_main(radio, 0, true); case V4L2_CID_TUNE_PREEMPHASIS: radio->preemph_75_us = ctrl->val == V4L2_PREEMPHASIS_75_uS; return keene_cmd_set(radio); case V4L2_CID_AUDIO_COMPRESSION_GAIN: radio->tx = db2tx[(ctrl->val - ctrl->minimum) / ctrl->step]; return keene_cmd_set(radio); } return -EINVAL; } static int vidioc_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_CTRL: return v4l2_event_subscribe(fh, sub, 0); default: return -EINVAL; } } /* File system interface */ static const struct v4l2_file_operations usb_keene_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ctrl_ops keene_ctrl_ops = { .s_ctrl = keene_s_ctrl, }; static const struct v4l2_ioctl_ops usb_keene_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_modulator = vidioc_g_modulator, .vidioc_s_modulator = vidioc_s_modulator, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = vidioc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static void usb_keene_video_device_release(struct v4l2_device *v4l2_dev) { struct keene_device *radio = to_keene_dev(v4l2_dev); /* free rest memory */ v4l2_ctrl_handler_free(&radio->hdl); kfree(radio->buffer); kfree(radio); } /* check if the device is present and register with v4l and usb if it is */ static int usb_keene_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct keene_device *radio; struct v4l2_ctrl_handler *hdl; int retval = 0; /* * The Keene FM transmitter USB device has the same USB ID as * the Logitech AudioHub Speaker, but it should ignore the hid. * Check if the name is that of the Keene device. * If not, then someone connected the AudioHub and we shouldn't * attempt to handle this driver. * For reference: the product name of the AudioHub is * "AudioHub Speaker". */ if (dev->product && strcmp(dev->product, "B-LINK USB Audio ")) return -ENODEV; radio = kzalloc(sizeof(struct keene_device), GFP_KERNEL); if (radio) radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio || !radio->buffer) { dev_err(&intf->dev, "kmalloc for keene_device failed\n"); kfree(radio); retval = -ENOMEM; goto err; } hdl = &radio->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); v4l2_ctrl_new_std_menu(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_PREEMPHASIS, V4L2_PREEMPHASIS_75_uS, 1, V4L2_PREEMPHASIS_50_uS); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_TUNE_POWER_LEVEL, 84, 118, 1, 118); v4l2_ctrl_new_std(hdl, &keene_ctrl_ops, V4L2_CID_AUDIO_COMPRESSION_GAIN, -15, 18, 3, 0); radio->pa = 118; radio->tx = 0x32; radio->stereo = true; radio->curfreq = 95.16 * FREQ_MUL; if (hdl->error) { retval = hdl->error; v4l2_ctrl_handler_free(hdl); goto err_v4l2; } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_v4l2; } mutex_init(&radio->lock); radio->v4l2_dev.ctrl_handler = hdl; radio->v4l2_dev.release = usb_keene_video_device_release; strlcpy(radio->vdev.name, radio->v4l2_dev.name, sizeof(radio->vdev.name)); radio->vdev.v4l2_dev = &radio->v4l2_dev; radio->vdev.fops = &usb_keene_fops; radio->vdev.ioctl_ops = &usb_keene_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; usb_set_intfdata(intf, &radio->v4l2_dev); video_set_drvdata(&radio->vdev, radio); set_bit(V4L2_FL_USE_FH_PRIO, &radio->vdev.flags); retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1); if (retval < 0) { dev_err(&intf->dev, "could not register video device\n"); goto err_vdev; } v4l2_ctrl_handler_setup(hdl); dev_info(&intf->dev, "V4L2 device registered as %s\n", video_device_node_name(&radio->vdev)); return 0; err_vdev: v4l2_device_unregister(&radio->v4l2_dev); err_v4l2: kfree(radio->buffer); kfree(radio); err: return retval; } /* USB subsystem interface */ static struct usb_driver usb_keene_driver = { .name = "radio-keene", .probe = usb_keene_probe, .disconnect = usb_keene_disconnect, .id_table = usb_keene_device_table, }; static int __init keene_init(void) { int retval = usb_register(&usb_keene_driver); if (retval) pr_err(KBUILD_MODNAME ": usb_register failed. Error number %d\n", retval); return retval; } static void __exit keene_exit(void) { usb_deregister(&usb_keene_driver); } module_init(keene_init); module_exit(keene_exit);
gpl-2.0
rmbq/android_kernel_lge_hammerhead
drivers/net/ethernet/marvell/pxa168_eth.c
4806
42635
/* * PXA168 ethernet driver. * Most of the code is derived from mv643xx ethernet driver. * * Copyright (C) 2010 Marvell International Ltd. * Sachin Sanap <ssanap@marvell.com> * Zhangfei Gao <zgao6@marvell.com> * Philip Rakity <prakity@marvell.com> * Mark Brown <markb@marvell.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/etherdevice.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/clk.h> #include <linux/phy.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/types.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> #include <linux/pxa168_eth.h> #define DRIVER_NAME "pxa168-eth" #define DRIVER_VERSION "0.3" /* * Registers */ #define PHY_ADDRESS 0x0000 #define SMI 0x0010 #define PORT_CONFIG 0x0400 #define PORT_CONFIG_EXT 0x0408 #define PORT_COMMAND 0x0410 #define PORT_STATUS 0x0418 #define HTPR 0x0428 #define SDMA_CONFIG 0x0440 #define SDMA_CMD 0x0448 #define INT_CAUSE 0x0450 #define INT_W_CLEAR 0x0454 #define INT_MASK 0x0458 #define ETH_F_RX_DESC_0 0x0480 #define ETH_C_RX_DESC_0 0x04A0 #define ETH_C_TX_DESC_1 0x04E4 /* smi register */ #define SMI_BUSY (1 << 28) /* 0 - Write, 1 - Read */ #define SMI_R_VALID (1 << 27) /* 0 - Write, 1 - Read */ #define SMI_OP_W (0 << 26) /* Write operation */ #define SMI_OP_R (1 << 26) /* Read operation */ #define PHY_WAIT_ITERATIONS 10 #define PXA168_ETH_PHY_ADDR_DEFAULT 0 /* RX & TX descriptor command */ #define BUF_OWNED_BY_DMA (1 << 31) /* RX descriptor status */ #define RX_EN_INT (1 << 23) #define RX_FIRST_DESC (1 << 17) #define RX_LAST_DESC (1 << 16) #define RX_ERROR (1 << 15) /* TX descriptor command */ #define TX_EN_INT (1 << 23) #define TX_GEN_CRC (1 << 22) #define TX_ZERO_PADDING (1 << 18) #define TX_FIRST_DESC (1 << 17) #define TX_LAST_DESC (1 << 16) #define TX_ERROR (1 << 15) /* SDMA_CMD */ #define SDMA_CMD_AT (1 << 31) #define SDMA_CMD_TXDL (1 << 24) #define SDMA_CMD_TXDH (1 << 23) #define SDMA_CMD_AR (1 << 15) #define SDMA_CMD_ERD (1 << 7) /* Bit definitions of the Port Config Reg */ #define PCR_HS (1 << 12) #define PCR_EN (1 << 7) #define PCR_PM (1 << 0) /* Bit definitions of the Port Config Extend Reg */ #define PCXR_2BSM (1 << 28) #define PCXR_DSCP_EN (1 << 21) #define PCXR_MFL_1518 (0 << 14) #define PCXR_MFL_1536 (1 << 14) #define PCXR_MFL_2048 (2 << 14) #define PCXR_MFL_64K (3 << 14) #define PCXR_FLP (1 << 11) #define PCXR_PRIO_TX_OFF 3 #define PCXR_TX_HIGH_PRI (7 << PCXR_PRIO_TX_OFF) /* Bit definitions of the SDMA Config Reg */ #define SDCR_BSZ_OFF 12 #define SDCR_BSZ8 (3 << SDCR_BSZ_OFF) #define SDCR_BSZ4 (2 << SDCR_BSZ_OFF) #define SDCR_BSZ2 (1 << SDCR_BSZ_OFF) #define SDCR_BSZ1 (0 << SDCR_BSZ_OFF) #define SDCR_BLMR (1 << 6) #define SDCR_BLMT (1 << 7) #define SDCR_RIFB (1 << 9) #define SDCR_RC_OFF 2 #define SDCR_RC_MAX_RETRANS (0xf << SDCR_RC_OFF) /* * Bit definitions of the Interrupt Cause Reg * and Interrupt MASK Reg is the same */ #define ICR_RXBUF (1 << 0) #define ICR_TXBUF_H (1 << 2) #define ICR_TXBUF_L (1 << 3) #define ICR_TXEND_H (1 << 6) #define ICR_TXEND_L (1 << 7) #define ICR_RXERR (1 << 8) #define ICR_TXERR_H (1 << 10) #define ICR_TXERR_L (1 << 11) #define ICR_TX_UDR (1 << 13) #define ICR_MII_CH (1 << 28) #define ALL_INTS (ICR_TXBUF_H | ICR_TXBUF_L | ICR_TX_UDR |\ ICR_TXERR_H | ICR_TXERR_L |\ ICR_TXEND_H | ICR_TXEND_L |\ ICR_RXBUF | ICR_RXERR | ICR_MII_CH) #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ #define NUM_RX_DESCS 64 #define NUM_TX_DESCS 64 #define HASH_ADD 0 #define HASH_DELETE 1 #define HASH_ADDR_TABLE_SIZE 0x4000 /* 16K (1/2K address - PCR_HS == 1) */ #define HOP_NUMBER 12 /* Bit definitions for Port status */ #define PORT_SPEED_100 (1 << 0) #define FULL_DUPLEX (1 << 1) #define FLOW_CONTROL_ENABLED (1 << 2) #define LINK_UP (1 << 3) /* Bit definitions for work to be done */ #define WORK_LINK (1 << 0) #define WORK_TX_DONE (1 << 1) /* * Misc definitions. */ #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) struct rx_desc { u32 cmd_sts; /* Descriptor command status */ u16 byte_cnt; /* Descriptor buffer byte count */ u16 buf_size; /* Buffer size */ u32 buf_ptr; /* Descriptor buffer pointer */ u32 next_desc_ptr; /* Next descriptor pointer */ }; struct tx_desc { u32 cmd_sts; /* Command/status field */ u16 reserved; u16 byte_cnt; /* buffer byte count */ u32 buf_ptr; /* pointer to buffer for this descriptor */ u32 next_desc_ptr; /* Pointer to next descriptor */ }; struct pxa168_eth_private { int port_num; /* User Ethernet port number */ int rx_resource_err; /* Rx ring resource error flag */ /* Next available and first returning Rx resource */ int rx_curr_desc_q, rx_used_desc_q; /* Next available and first returning Tx resource */ int tx_curr_desc_q, tx_used_desc_q; struct rx_desc *p_rx_desc_area; dma_addr_t rx_desc_dma; int rx_desc_area_size; struct sk_buff **rx_skb; struct tx_desc *p_tx_desc_area; dma_addr_t tx_desc_dma; int tx_desc_area_size; struct sk_buff **tx_skb; struct work_struct tx_timeout_task; struct net_device *dev; struct napi_struct napi; u8 work_todo; int skb_size; /* Size of Tx Ring per queue */ int tx_ring_size; /* Number of tx descriptors in use */ int tx_desc_count; /* Size of Rx Ring per queue */ int rx_ring_size; /* Number of rx descriptors in use */ int rx_desc_count; /* * Used in case RX Ring is empty, which can occur when * system does not have resources (skb's) */ struct timer_list timeout; struct mii_bus *smi_bus; struct phy_device *phy; /* clock */ struct clk *clk; struct pxa168_eth_platform_data *pd; /* * Ethernet controller base address. */ void __iomem *base; /* Pointer to the hardware address filter table */ void *htpr; dma_addr_t htpr_dma; }; struct addr_table_entry { __le32 lo; __le32 hi; }; /* Bit fields of a Hash Table Entry */ enum hash_table_entry { HASH_ENTRY_VALID = 1, SKIP = 2, HASH_ENTRY_RECEIVE_DISCARD = 4, HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); static void eth_port_reset(struct net_device *dev); static void eth_port_start(struct net_device *dev); static int pxa168_eth_open(struct net_device *dev); static int pxa168_eth_stop(struct net_device *dev); static int ethernet_phy_setup(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { return readl(pep->base + offset); } static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) { writel(data, pep->base + offset); } static void abort_dma(struct pxa168_eth_private *pep) { int delay; int max_retries = 40; do { wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT); udelay(100); delay = 10; while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT)) && delay-- > 0) { udelay(10); } } while (max_retries-- > 0 && delay <= 0); if (max_retries <= 0) printk(KERN_ERR "%s : DMA Stuck\n", __func__); } static int ethernet_phy_get(struct pxa168_eth_private *pep) { unsigned int reg_data; reg_data = rdl(pep, PHY_ADDRESS); return (reg_data >> (5 * pep->port_num)) & 0x1f; } static void ethernet_phy_set_addr(struct pxa168_eth_private *pep, int phy_addr) { u32 reg_data; int addr_shift = 5 * pep->port_num; reg_data = rdl(pep, PHY_ADDRESS); reg_data &= ~(0x1f << addr_shift); reg_data |= (phy_addr & 0x1f) << addr_shift; wrl(pep, PHY_ADDRESS, reg_data); } static void ethernet_phy_reset(struct pxa168_eth_private *pep) { int data; data = phy_read(pep->phy, MII_BMCR); if (data < 0) return; data |= BMCR_RESET; if (phy_write(pep->phy, MII_BMCR, data) < 0) return; do { data = phy_read(pep->phy, MII_BMCR); } while (data >= 0 && data & BMCR_RESET); } static void rxq_refill(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct sk_buff *skb; struct rx_desc *p_used_rx_desc; int used_rx_desc; while (pep->rx_desc_count < pep->rx_ring_size) { int size; skb = netdev_alloc_skb(dev, pep->skb_size); if (!skb) break; if (SKB_DMA_REALIGN) skb_reserve(skb, SKB_DMA_REALIGN); pep->rx_desc_count++; /* Get 'used' Rx descriptor */ used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; size = skb->end - skb->data; p_used_rx_desc->buf_ptr = dma_map_single(NULL, skb->data, size, DMA_FROM_DEVICE); p_used_rx_desc->buf_size = size; pep->rx_skb[used_rx_desc] = skb; /* Return the descriptor to DMA ownership */ wmb(); p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; wmb(); /* Move the used descriptor pointer to the next descriptor */ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; /* Any Rx return cancels the Rx resource error status */ pep->rx_resource_err = 0; skb_reserve(skb, ETH_HW_IP_ALIGN); } /* * If RX ring is empty of SKB, set a timer to try allocating * again at a later time. */ if (pep->rx_desc_count == 0) { pep->timeout.expires = jiffies + (HZ / 10); add_timer(&pep->timeout); } } static inline void rxq_refill_timer_wrapper(unsigned long data) { struct pxa168_eth_private *pep = (void *)data; napi_schedule(&pep->napi); } static inline u8 flip_8_bits(u8 x) { return (((x) & 0x01) << 3) | (((x) & 0x02) << 1) | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3) | (((x) & 0x10) << 3) | (((x) & 0x20) << 1) | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3); } static void nibble_swap_every_byte(unsigned char *mac_addr) { int i; for (i = 0; i < ETH_ALEN; i++) { mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) | ((mac_addr[i] & 0xf0) >> 4); } } static void inverse_every_nibble(unsigned char *mac_addr) { int i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = flip_8_bits(mac_addr[i]); } /* * ---------------------------------------------------------------------------- * This function will calculate the hash function of the address. * Inputs * mac_addr_orig - MAC address. * Outputs * return the calculated entry. */ static u32 hash_function(unsigned char *mac_addr_orig) { u32 hash_result; u32 addr0; u32 addr1; u32 addr2; u32 addr3; unsigned char mac_addr[ETH_ALEN]; /* Make a copy of MAC address since we are going to performe bit * operations on it */ memcpy(mac_addr, mac_addr_orig, ETH_ALEN); nibble_swap_every_byte(mac_addr); inverse_every_nibble(mac_addr); addr0 = (mac_addr[5] >> 2) & 0x3f; addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2); addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1; addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8); hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3); hash_result = hash_result & 0x07ff; return hash_result; } /* * ---------------------------------------------------------------------------- * This function will add/del an entry to the address table. * Inputs * pep - ETHERNET . * mac_addr - MAC address. * skip - if 1, skip this address.Used in case of deleting an entry which is a * part of chain in the hash table.We can't just delete the entry since * that will break the chain.We need to defragment the tables time to * time. * rd - 0 Discard packet upon match. * - 1 Receive packet upon match. * Outputs * address table entry is added/deleted. * 0 if success. * -ENOSPC if table full */ static int add_del_hash_entry(struct pxa168_eth_private *pep, unsigned char *mac_addr, u32 rd, u32 skip, int del) { struct addr_table_entry *entry, *start; u32 new_high; u32 new_low; u32 i; new_low = (((mac_addr[1] >> 4) & 0xf) << 15) | (((mac_addr[1] >> 0) & 0xf) << 11) | (((mac_addr[0] >> 4) & 0xf) << 7) | (((mac_addr[0] >> 0) & 0xf) << 3) | (((mac_addr[3] >> 4) & 0x1) << 31) | (((mac_addr[3] >> 0) & 0xf) << 27) | (((mac_addr[2] >> 4) & 0xf) << 23) | (((mac_addr[2] >> 0) & 0xf) << 19) | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT) | HASH_ENTRY_VALID; new_high = (((mac_addr[5] >> 4) & 0xf) << 15) | (((mac_addr[5] >> 0) & 0xf) << 11) | (((mac_addr[4] >> 4) & 0xf) << 7) | (((mac_addr[4] >> 0) & 0xf) << 3) | (((mac_addr[3] >> 5) & 0x7) << 0); /* * Pick the appropriate table, start scanning for free/reusable * entries at the index obtained by hashing the specified MAC address */ start = pep->htpr; entry = start + hash_function(mac_addr); for (i = 0; i < HOP_NUMBER; i++) { if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) { break; } else { /* if same address put in same position */ if (((le32_to_cpu(entry->lo) & 0xfffffff8) == (new_low & 0xfffffff8)) && (le32_to_cpu(entry->hi) == new_high)) { break; } } if (entry == start + 0x7ff) entry = start; else entry++; } if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) && (le32_to_cpu(entry->hi) != new_high) && del) return 0; if (i == HOP_NUMBER) { if (!del) { printk(KERN_INFO "%s: table section is full, need to " "move to 16kB implementation?\n", __FILE__); return -ENOSPC; } else return 0; } /* * Update the selected entry */ if (del) { entry->hi = 0; entry->lo = 0; } else { entry->hi = cpu_to_le32(new_high); entry->lo = cpu_to_le32(new_low); } return 0; } /* * ---------------------------------------------------------------------------- * Create an addressTable entry from MAC address info * found in the specifed net_device struct * * Input : pointer to ethernet interface network device structure * Output : N/A */ static void update_hash_table_mac_address(struct pxa168_eth_private *pep, unsigned char *oaddr, unsigned char *addr) { /* Delete old entry */ if (oaddr) add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE); /* Add new entry */ add_del_hash_entry(pep, addr, 1, 0, HASH_ADD); } static int init_hash_table(struct pxa168_eth_private *pep) { /* * Hardware expects CPU to build a hash table based on a predefined * hash function and populate it based on hardware address. The * location of the hash table is identified by 32-bit pointer stored * in HTPR internal register. Two possible sizes exists for the hash * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB * (16kB of DRAM required (4 x 4 kB banks)).We currently only support * 1/2kB. */ /* TODO: Add support for 8kB hash table and alternative hash * function.Driver can dynamically switch to them if the 1/2kB hash * table is full. */ if (pep->htpr == NULL) { pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, &pep->htpr_dma, GFP_KERNEL); if (pep->htpr == NULL) return -ENOMEM; } memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); wrl(pep, HTPR, pep->htpr_dma); return 0; } static void pxa168_eth_set_rx_mode(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct netdev_hw_addr *ha; u32 val; val = rdl(pep, PORT_CONFIG); if (dev->flags & IFF_PROMISC) val |= PCR_PM; else val &= ~PCR_PM; wrl(pep, PORT_CONFIG, val); /* * Remove the old list of MAC address and add dev->addr * and multicast address. */ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE); update_hash_table_mac_address(pep, NULL, dev->dev_addr); netdev_for_each_mc_addr(ha, dev) update_hash_table_mac_address(pep, NULL, ha->addr); } static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = addr; struct pxa168_eth_private *pep = netdev_priv(dev); unsigned char oldMac[ETH_ALEN]; if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); netif_addr_lock_bh(dev); update_hash_table_mac_address(pep, oldMac, dev->dev_addr); netif_addr_unlock_bh(dev); return 0; } static void eth_port_start(struct net_device *dev) { unsigned int val = 0; struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; /* Perform PHY reset, if there is a PHY. */ if (pep->phy != NULL) { struct ethtool_cmd cmd; pxa168_get_settings(pep->dev, &cmd); ethernet_phy_reset(pep); pxa168_set_settings(pep->dev, &cmd); } /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; wrl(pep, ETH_C_TX_DESC_1, (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc))); /* Assignment of Rx CRDP of given queue */ rx_curr_desc = pep->rx_curr_desc_q; wrl(pep, ETH_C_RX_DESC_0, (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); wrl(pep, ETH_F_RX_DESC_0, (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc))); /* Clear all interrupts */ wrl(pep, INT_CAUSE, 0); /* Enable all interrupts for receive, transmit and error. */ wrl(pep, INT_MASK, ALL_INTS); val = rdl(pep, PORT_CONFIG); val |= PCR_EN; wrl(pep, PORT_CONFIG, val); /* Start RX DMA engine */ val = rdl(pep, SDMA_CMD); val |= SDMA_CMD_ERD; wrl(pep, SDMA_CMD, val); } static void eth_port_reset(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); unsigned int val = 0; /* Stop all interrupts for receive, transmit and error. */ wrl(pep, INT_MASK, 0); /* Clear all interrupts */ wrl(pep, INT_CAUSE, 0); /* Stop RX DMA */ val = rdl(pep, SDMA_CMD); val &= ~SDMA_CMD_ERD; /* abort dma command */ /* Abort any transmit and receive operations and put DMA * in idle state. */ abort_dma(pep); /* Disable port */ val = rdl(pep, PORT_CONFIG); val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); } /* * txq_reclaim - Free the tx desc data for completed descriptors * If force is non-zero, frees uncompleted descriptors as well */ static int txq_reclaim(struct net_device *dev, int force) { struct pxa168_eth_private *pep = netdev_priv(dev); struct tx_desc *desc; u32 cmd_sts; struct sk_buff *skb; int tx_index; dma_addr_t addr; int count; int released = 0; netif_tx_lock(dev); pep->work_todo &= ~WORK_TX_DONE; while (pep->tx_desc_count > 0) { tx_index = pep->tx_used_desc_q; desc = &pep->p_tx_desc_area[tx_index]; cmd_sts = desc->cmd_sts; if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) { if (released > 0) { goto txq_reclaim_end; } else { released = -1; goto txq_reclaim_end; } } pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size; pep->tx_desc_count--; addr = desc->buf_ptr; count = desc->byte_cnt; skb = pep->tx_skb[tx_index]; if (skb) pep->tx_skb[tx_index] = NULL; if (cmd_sts & TX_ERROR) { if (net_ratelimit()) printk(KERN_ERR "%s: Error in TX\n", dev->name); dev->stats.tx_errors++; } dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); if (skb) dev_kfree_skb_irq(skb); released++; } txq_reclaim_end: netif_tx_unlock(dev); return released; } static void pxa168_eth_tx_timeout(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); printk(KERN_INFO "%s: TX timeout desc_count %d\n", dev->name, pep->tx_desc_count); schedule_work(&pep->tx_timeout_task); } static void pxa168_eth_tx_timeout_task(struct work_struct *work) { struct pxa168_eth_private *pep = container_of(work, struct pxa168_eth_private, tx_timeout_task); struct net_device *dev = pep->dev; pxa168_eth_stop(dev); pxa168_eth_open(dev); } static int rxq_process(struct net_device *dev, int budget) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int received_packets = 0; struct sk_buff *skb; while (budget-- > 0) { int rx_next_curr_desc, rx_curr_desc, rx_used_desc; struct rx_desc *rx_desc; unsigned int cmd_sts; /* Do not process Rx ring in case of Rx ring resource error */ if (pep->rx_resource_err) break; rx_curr_desc = pep->rx_curr_desc_q; rx_used_desc = pep->rx_used_desc_q; rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; rmb(); if (cmd_sts & (BUF_OWNED_BY_DMA)) break; skb = pep->rx_skb[rx_curr_desc]; pep->rx_skb[rx_curr_desc] = NULL; rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size; pep->rx_curr_desc_q = rx_next_curr_desc; /* Rx descriptors exhausted. */ /* Set the Rx ring resource error flag */ if (rx_next_curr_desc == rx_used_desc) pep->rx_resource_err = 1; pep->rx_desc_count--; dma_unmap_single(NULL, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); received_packets++; /* * Update statistics. * Note byte count includes 4 byte CRC count */ stats->rx_packets++; stats->rx_bytes += rx_desc->byte_cnt; /* * In case received a packet without first / last bits on OR * the error summary bit is on, the packets needs to be droped. */ if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) || (cmd_sts & RX_ERROR)) { stats->rx_dropped++; if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) printk(KERN_ERR "%s: Rx pkt on multiple desc\n", dev->name); } if (cmd_sts & RX_ERROR) stats->rx_errors++; dev_kfree_skb_irq(skb); } else { /* * The -4 is for the CRC in the trailer of the * received packet */ skb_put(skb, rx_desc->byte_cnt - 4); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } } /* Fill RX ring with skb's */ rxq_refill(dev); return received_packets; } static int pxa168_eth_collect_events(struct pxa168_eth_private *pep, struct net_device *dev) { u32 icr; int ret = 0; icr = rdl(pep, INT_CAUSE); if (icr == 0) return IRQ_NONE; wrl(pep, INT_CAUSE, ~icr); if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) { pep->work_todo |= WORK_TX_DONE; ret = 1; } if (icr & ICR_RXBUF) ret = 1; if (icr & ICR_MII_CH) { pep->work_todo |= WORK_LINK; ret = 1; } return ret; } static void handle_link_event(struct pxa168_eth_private *pep) { struct net_device *dev = pep->dev; u32 port_status; int speed; int duplex; int fc; port_status = rdl(pep, PORT_STATUS); if (!(port_status & LINK_UP)) { if (netif_carrier_ok(dev)) { printk(KERN_INFO "%s: link down\n", dev->name); netif_carrier_off(dev); txq_reclaim(dev, 1); } return; } if (port_status & PORT_SPEED_100) speed = 100; else speed = 10; duplex = (port_status & FULL_DUPLEX) ? 1 : 0; fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " "flow control %sabled\n", dev->name, speed, duplex ? "full" : "half", fc ? "en" : "dis"); if (!netif_carrier_ok(dev)) netif_carrier_on(dev); } static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct pxa168_eth_private *pep = netdev_priv(dev); if (unlikely(!pxa168_eth_collect_events(pep, dev))) return IRQ_NONE; /* Disable interrupts */ wrl(pep, INT_MASK, 0); napi_schedule(&pep->napi); return IRQ_HANDLED; } static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep) { int skb_size; /* * Reserve 2+14 bytes for an ethernet header (the hardware * automatically prepends 2 bytes of dummy data to each * received packet), 16 bytes for up to four VLAN tags, and * 4 bytes for the trailing FCS -- 36 bytes total. */ skb_size = pep->dev->mtu + 36; /* * Make sure that the skb size is a multiple of 8 bytes, as * the lower three bits of the receive descriptor's buffer * size field are ignored by the hardware. */ pep->skb_size = (skb_size + 7) & ~7; /* * If NET_SKB_PAD is smaller than a cache line, * netdev_alloc_skb() will cause skb->data to be misaligned * to a cache line boundary. If this is the case, include * some extra space to allow re-aligning the data area. */ pep->skb_size += SKB_DMA_REALIGN; } static int set_port_config_ext(struct pxa168_eth_private *pep) { int skb_size; pxa168_eth_recalc_skb_size(pep); if (pep->skb_size <= 1518) skb_size = PCXR_MFL_1518; else if (pep->skb_size <= 1536) skb_size = PCXR_MFL_1536; else if (pep->skb_size <= 2048) skb_size = PCXR_MFL_2048; else skb_size = PCXR_MFL_64K; /* Extended Port Configuration */ wrl(pep, PORT_CONFIG_EXT, PCXR_2BSM | /* Two byte prefix aligns IP hdr */ PCXR_DSCP_EN | /* Enable DSCP in IP */ skb_size | PCXR_FLP | /* do not force link pass */ PCXR_TX_HIGH_PRI); /* Transmit - high priority queue */ return 0; } static int pxa168_init_hw(struct pxa168_eth_private *pep) { int err = 0; /* Disable interrupts */ wrl(pep, INT_MASK, 0); wrl(pep, INT_CAUSE, 0); /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); /* Abort any transmit and receive operations and put DMA * in idle state. */ abort_dma(pep); /* Initialize address hash table */ err = init_hash_table(pep); if (err) return err; /* SDMA configuration */ wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */ SDCR_RIFB | /* Rx interrupt on frame */ SDCR_BLMT | /* Little endian transmit */ SDCR_BLMR | /* Little endian receive */ SDCR_RC_MAX_RETRANS); /* Max retransmit count */ /* Port Configuration */ wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */ set_port_config_ext(pep); return err; } static int rxq_init(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct rx_desc *p_rx_desc; int size = 0, i = 0; int rx_desc_num = pep->rx_ring_size; /* Allocate RX skb rings */ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, GFP_KERNEL); if (!pep->rx_skb) return -ENOMEM; /* Allocate RX ring */ pep->rx_desc_count = 0; size = pep->rx_ring_size * sizeof(struct rx_desc); pep->rx_desc_area_size = size; pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, &pep->rx_desc_dma, GFP_KERNEL); if (!pep->p_rx_desc_area) { printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", dev->name, size); goto out; } memset((void *)pep->p_rx_desc_area, 0, size); /* initialize the next_desc_ptr links in the Rx descriptors ring */ p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area; for (i = 0; i < rx_desc_num; i++) { p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma + ((i + 1) % rx_desc_num) * sizeof(struct rx_desc); } /* Save Rx desc pointer to driver struct. */ pep->rx_curr_desc_q = 0; pep->rx_used_desc_q = 0; pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc); return 0; out: kfree(pep->rx_skb); return -ENOMEM; } static void rxq_deinit(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); int curr; /* Free preallocated skb's on RX rings */ for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { if (pep->rx_skb[curr]) { dev_kfree_skb(pep->rx_skb[curr]); pep->rx_desc_count--; } } if (pep->rx_desc_count) printk(KERN_ERR "Error in freeing Rx Ring. %d skb's still\n", pep->rx_desc_count); /* Free RX ring */ if (pep->p_rx_desc_area) dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size, pep->p_rx_desc_area, pep->rx_desc_dma); kfree(pep->rx_skb); } static int txq_init(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct tx_desc *p_tx_desc; int size = 0, i = 0; int tx_desc_num = pep->tx_ring_size; pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, GFP_KERNEL); if (!pep->tx_skb) return -ENOMEM; /* Allocate TX ring */ pep->tx_desc_count = 0; size = pep->tx_ring_size * sizeof(struct tx_desc); pep->tx_desc_area_size = size; pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, &pep->tx_desc_dma, GFP_KERNEL); if (!pep->p_tx_desc_area) { printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", dev->name, size); goto out; } memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); /* Initialize the next_desc_ptr links in the Tx descriptors ring */ p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area; for (i = 0; i < tx_desc_num; i++) { p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma + ((i + 1) % tx_desc_num) * sizeof(struct tx_desc); } pep->tx_curr_desc_q = 0; pep->tx_used_desc_q = 0; pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc); return 0; out: kfree(pep->tx_skb); return -ENOMEM; } static void txq_deinit(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); /* Free outstanding skb's on TX ring */ txq_reclaim(dev, 1); BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q); /* Free TX ring */ if (pep->p_tx_desc_area) dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size, pep->p_tx_desc_area, pep->tx_desc_dma); kfree(pep->tx_skb); } static int pxa168_eth_open(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); int err; err = request_irq(dev->irq, pxa168_eth_int_handler, IRQF_DISABLED, dev->name, dev); if (err) { dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); return -EAGAIN; } pep->rx_resource_err = 0; err = rxq_init(dev); if (err != 0) goto out_free_irq; err = txq_init(dev); if (err != 0) goto out_free_rx_skb; pep->rx_used_desc_q = 0; pep->rx_curr_desc_q = 0; /* Fill RX ring with skb's */ rxq_refill(dev); pep->rx_used_desc_q = 0; pep->rx_curr_desc_q = 0; netif_carrier_off(dev); eth_port_start(dev); napi_enable(&pep->napi); return 0; out_free_rx_skb: rxq_deinit(dev); out_free_irq: free_irq(dev->irq, dev); return err; } static int pxa168_eth_stop(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); eth_port_reset(dev); /* Disable interrupts */ wrl(pep, INT_MASK, 0); wrl(pep, INT_CAUSE, 0); /* Write to ICR to clear interrupts. */ wrl(pep, INT_W_CLEAR, 0); napi_disable(&pep->napi); del_timer_sync(&pep->timeout); netif_carrier_off(dev); free_irq(dev->irq, dev); rxq_deinit(dev); txq_deinit(dev); return 0; } static int pxa168_eth_change_mtu(struct net_device *dev, int mtu) { int retval; struct pxa168_eth_private *pep = netdev_priv(dev); if ((mtu > 9500) || (mtu < 68)) return -EINVAL; dev->mtu = mtu; retval = set_port_config_ext(pep); if (!netif_running(dev)) return 0; /* * Stop and then re-open the interface. This will allocate RX * skbs of the new MTU. * There is a possible danger that the open will not succeed, * due to memory being full. */ pxa168_eth_stop(dev); if (pxa168_eth_open(dev)) { dev_printk(KERN_ERR, &dev->dev, "fatal error on re-opening device after " "MTU change\n"); } return 0; } static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep) { int tx_desc_curr; tx_desc_curr = pep->tx_curr_desc_q; pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size; BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q); pep->tx_desc_count++; return tx_desc_curr; } static int pxa168_rx_poll(struct napi_struct *napi, int budget) { struct pxa168_eth_private *pep = container_of(napi, struct pxa168_eth_private, napi); struct net_device *dev = pep->dev; int work_done = 0; if (unlikely(pep->work_todo & WORK_LINK)) { pep->work_todo &= ~(WORK_LINK); handle_link_event(pep); } /* * We call txq_reclaim every time since in NAPI interupts are disabled * and due to this we miss the TX_DONE interrupt,which is not updated in * interrupt status register. */ txq_reclaim(dev, 0); if (netif_queue_stopped(dev) && pep->tx_ring_size - pep->tx_desc_count > 1) { netif_wake_queue(dev); } work_done = rxq_process(dev, budget); if (work_done < budget) { napi_complete(napi); wrl(pep, INT_MASK, ALL_INTS); } return work_done; } static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct tx_desc *desc; int tx_index; int length; tx_index = eth_alloc_tx_desc_index(pep); desc = &pep->p_tx_desc_area[tx_index]; length = skb->len; pep->tx_skb[tx_index] = skb; desc->byte_cnt = length; desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); skb_tx_timestamp(skb); wmb(); desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; wmb(); wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); stats->tx_bytes += length; stats->tx_packets++; dev->trans_start = jiffies; if (pep->tx_ring_size - pep->tx_desc_count <= 1) { /* We handled the current skb, but now we are out of space.*/ netif_stop_queue(dev); } return NETDEV_TX_OK; } static int smi_wait_ready(struct pxa168_eth_private *pep) { int i = 0; /* wait for the SMI register to become available */ for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) return -ETIMEDOUT; msleep(10); } return 0; } static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum) { struct pxa168_eth_private *pep = bus->priv; int i = 0; int val; if (smi_wait_ready(pep)) { printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R); /* now wait for the data to be valid */ for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { printk(KERN_WARNING "pxa168_eth: SMI bus read not valid\n"); return -ENODEV; } msleep(10); } return val & 0xffff; } static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value) { struct pxa168_eth_private *pep = bus->priv; if (smi_wait_ready(pep)) { printk(KERN_WARNING "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_W | (value & 0xffff)); if (smi_wait_ready(pep)) { printk(KERN_ERR "pxa168_eth: SMI bus busy timeout\n"); return -ETIMEDOUT; } return 0; } static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct pxa168_eth_private *pep = netdev_priv(dev); if (pep->phy != NULL) return phy_mii_ioctl(pep->phy, ifr, cmd); return -EOPNOTSUPP; } static struct phy_device *phy_scan(struct pxa168_eth_private *pep, int phy_addr) { struct mii_bus *bus = pep->smi_bus; struct phy_device *phydev; int start; int num; int i; if (phy_addr == PXA168_ETH_PHY_ADDR_DEFAULT) { /* Scan entire range */ start = ethernet_phy_get(pep); num = 32; } else { /* Use phy addr specific to platform */ start = phy_addr & 0x1f; num = 1; } phydev = NULL; for (i = 0; i < num; i++) { int addr = (start + i) & 0x1f; if (bus->phy_map[addr] == NULL) mdiobus_scan(bus, addr); if (phydev == NULL) { phydev = bus->phy_map[addr]; if (phydev != NULL) ethernet_phy_set_addr(pep, addr); } } return phydev; } static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex) { struct phy_device *phy = pep->phy; ethernet_phy_reset(pep); phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII); if (speed == 0) { phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; phy->supported &= PHY_BASIC_FEATURES; phy->advertising = phy->supported | ADVERTISED_Autoneg; } else { phy->autoneg = AUTONEG_DISABLE; phy->advertising = 0; phy->speed = speed; phy->duplex = duplex; } phy_start_aneg(phy); } static int ethernet_phy_setup(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); if (pep->pd->init) pep->pd->init(); pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); if (pep->phy != NULL) phy_init(pep, pep->pd->speed, pep->pd->duplex); update_hash_table_mac_address(pep, NULL, dev->dev_addr); return 0; } static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pxa168_eth_private *pep = netdev_priv(dev); int err; err = phy_read_status(pep->phy); if (err == 0) err = phy_ethtool_gset(pep->phy, cmd); return err; } static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct pxa168_eth_private *pep = netdev_priv(dev); return phy_ethtool_sset(pep->phy, cmd); } static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strncpy(info->driver, DRIVER_NAME, 32); strncpy(info->version, DRIVER_VERSION, 32); strncpy(info->fw_version, "N/A", 32); strncpy(info->bus_info, "N/A", 32); } static const struct ethtool_ops pxa168_ethtool_ops = { .get_settings = pxa168_get_settings, .set_settings = pxa168_set_settings, .get_drvinfo = pxa168_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops pxa168_eth_netdev_ops = { .ndo_open = pxa168_eth_open, .ndo_stop = pxa168_eth_stop, .ndo_start_xmit = pxa168_eth_start_xmit, .ndo_set_rx_mode = pxa168_eth_set_rx_mode, .ndo_set_mac_address = pxa168_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = pxa168_eth_do_ioctl, .ndo_change_mtu = pxa168_eth_change_mtu, .ndo_tx_timeout = pxa168_eth_tx_timeout, }; static int pxa168_eth_probe(struct platform_device *pdev) { struct pxa168_eth_private *pep = NULL; struct net_device *dev = NULL; struct resource *res; struct clk *clk; int err; printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); clk = clk_get(&pdev->dev, "MFUCLK"); if (IS_ERR(clk)) { printk(KERN_ERR "%s: Fast Ethernet failed to get clock\n", DRIVER_NAME); return -ENODEV; } clk_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); if (!dev) { err = -ENOMEM; goto err_clk; } platform_set_drvdata(pdev, dev); pep = netdev_priv(dev); pep->dev = dev; pep->clk = clk; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { err = -ENODEV; goto err_netdev; } pep->base = ioremap(res->start, resource_size(res)); if (pep->base == NULL) { err = -ENOMEM; goto err_netdev; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); dev->irq = res->start; dev->netdev_ops = &pxa168_eth_netdev_ops; dev->watchdog_timeo = 2 * HZ; dev->base_addr = 0; SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops); INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); eth_hw_addr_random(dev); pep->pd = pdev->dev.platform_data; pep->rx_ring_size = NUM_RX_DESCS; if (pep->pd->rx_queue_size) pep->rx_ring_size = pep->pd->rx_queue_size; pep->tx_ring_size = NUM_TX_DESCS; if (pep->pd->tx_queue_size) pep->tx_ring_size = pep->pd->tx_queue_size; pep->port_num = pep->pd->port_number; /* Hardware supports only 3 ports */ BUG_ON(pep->port_num > 2); netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); memset(&pep->timeout, 0, sizeof(struct timer_list)); init_timer(&pep->timeout); pep->timeout.function = rxq_refill_timer_wrapper; pep->timeout.data = (unsigned long)pep; pep->smi_bus = mdiobus_alloc(); if (pep->smi_bus == NULL) { err = -ENOMEM; goto err_base; } pep->smi_bus->priv = pep; pep->smi_bus->name = "pxa168_eth smi"; pep->smi_bus->read = pxa168_smi_read; pep->smi_bus->write = pxa168_smi_write; snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); pep->smi_bus->parent = &pdev->dev; pep->smi_bus->phy_mask = 0xffffffff; err = mdiobus_register(pep->smi_bus); if (err) goto err_free_mdio; pxa168_init_hw(pep); err = ethernet_phy_setup(dev); if (err) goto err_mdiobus; SET_NETDEV_DEV(dev, &pdev->dev); err = register_netdev(dev); if (err) goto err_mdiobus; return 0; err_mdiobus: mdiobus_unregister(pep->smi_bus); err_free_mdio: mdiobus_free(pep->smi_bus); err_base: iounmap(pep->base); err_netdev: free_netdev(dev); err_clk: clk_disable(clk); clk_put(clk); return err; } static int pxa168_eth_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct pxa168_eth_private *pep = netdev_priv(dev); if (pep->htpr) { dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE, pep->htpr, pep->htpr_dma); pep->htpr = NULL; } if (pep->clk) { clk_disable(pep->clk); clk_put(pep->clk); pep->clk = NULL; } if (pep->phy != NULL) phy_detach(pep->phy); iounmap(pep->base); pep->base = NULL; mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); cancel_work_sync(&pep->tx_timeout_task); free_netdev(dev); platform_set_drvdata(pdev, NULL); return 0; } static void pxa168_eth_shutdown(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); eth_port_reset(dev); } #ifdef CONFIG_PM static int pxa168_eth_resume(struct platform_device *pdev) { return -ENOSYS; } static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state) { return -ENOSYS; } #else #define pxa168_eth_resume NULL #define pxa168_eth_suspend NULL #endif static struct platform_driver pxa168_eth_driver = { .probe = pxa168_eth_probe, .remove = pxa168_eth_remove, .shutdown = pxa168_eth_shutdown, .resume = pxa168_eth_resume, .suspend = pxa168_eth_suspend, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(pxa168_eth_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168"); MODULE_ALIAS("platform:pxa168_eth");
gpl-2.0
s9yobena/CSM-ANDROID
drivers/net/wireless/wl12xx/testmode.c
4806
7414
/* * This file is part of wl1271 * * Copyright (C) 2010 Nokia Corporation * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "testmode.h" #include <linux/slab.h> #include <net/genetlink.h> #include "wl12xx.h" #include "debug.h" #include "acx.h" #include "reg.h" #include "ps.h" #include "io.h" #define WL1271_TM_MAX_DATA_LENGTH 1024 enum wl1271_tm_commands { WL1271_TM_CMD_UNSPEC, WL1271_TM_CMD_TEST, WL1271_TM_CMD_INTERROGATE, WL1271_TM_CMD_CONFIGURE, WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */ WL1271_TM_CMD_SET_PLT_MODE, WL1271_TM_CMD_RECOVER, WL1271_TM_CMD_GET_MAC, __WL1271_TM_CMD_AFTER_LAST }; #define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1) enum wl1271_tm_attrs { WL1271_TM_ATTR_UNSPEC, WL1271_TM_ATTR_CMD_ID, WL1271_TM_ATTR_ANSWER, WL1271_TM_ATTR_DATA, WL1271_TM_ATTR_IE_ID, WL1271_TM_ATTR_PLT_MODE, __WL1271_TM_ATTR_AFTER_LAST }; #define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1) static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 }, [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 }, [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY, .len = WL1271_TM_MAX_DATA_LENGTH }, [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 }, [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 }, }; static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[]) { int buf_len, ret, len; struct sk_buff *skb; void *buf; u8 answer = 0; wl1271_debug(DEBUG_TESTMODE, "testmode cmd test"); if (!tb[WL1271_TM_ATTR_DATA]) return -EINVAL; buf = nla_data(tb[WL1271_TM_ATTR_DATA]); buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); if (tb[WL1271_TM_ATTR_ANSWER]) answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]); if (buf_len > sizeof(struct wl1271_command)) return -EMSGSIZE; mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { ret = -EINVAL; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; ret = wl1271_cmd_test(wl, buf, buf_len, answer); if (ret < 0) { wl1271_warning("testmode cmd test failed: %d", ret); goto out_sleep; } if (answer) { len = nla_total_size(buf_len); skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len); if (!skb) { ret = -ENOMEM; goto out_sleep; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_sleep; } out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out_sleep; } static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) { int ret; struct wl1271_command *cmd; struct sk_buff *skb; u8 ie_id; wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate"); if (!tb[WL1271_TM_ATTR_IE_ID]) return -EINVAL; ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); mutex_lock(&wl->mutex); if (wl->state == WL1271_STATE_OFF) { ret = -EINVAL; goto out; } ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; goto out_sleep; } ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd)); if (ret < 0) { wl1271_warning("testmode cmd interrogate failed: %d", ret); goto out_free; } skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); if (!skb) { ret = -ENOMEM; goto out_free; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out_free; out_free: kfree(cmd); out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out_free; } static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[]) { int buf_len, ret; void *buf; u8 ie_id; wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure"); if (!tb[WL1271_TM_ATTR_DATA]) return -EINVAL; if (!tb[WL1271_TM_ATTR_IE_ID]) return -EINVAL; ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]); buf = nla_data(tb[WL1271_TM_ATTR_DATA]); buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]); if (buf_len > sizeof(struct wl1271_command)) return -EMSGSIZE; mutex_lock(&wl->mutex); ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len); mutex_unlock(&wl->mutex); if (ret < 0) { wl1271_warning("testmode cmd configure failed: %d", ret); return ret; } return 0; } static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[]) { u32 val; int ret; wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode"); if (!tb[WL1271_TM_ATTR_PLT_MODE]) return -EINVAL; val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]); switch (val) { case 0: ret = wl1271_plt_stop(wl); break; case 1: ret = wl1271_plt_start(wl); break; default: ret = -EINVAL; break; } return ret; } static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[]) { wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover"); wl12xx_queue_recovery_work(wl); return 0; } static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[]) { struct sk_buff *skb; u8 mac_addr[ETH_ALEN]; int ret = 0; mutex_lock(&wl->mutex); if (!wl->plt) { ret = -EINVAL; goto out; } if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { ret = -EOPNOTSUPP; goto out; } mac_addr[0] = (u8)(wl->fuse_oui_addr >> 16); mac_addr[1] = (u8)(wl->fuse_oui_addr >> 8); mac_addr[2] = (u8) wl->fuse_oui_addr; mac_addr[3] = (u8)(wl->fuse_nic_addr >> 16); mac_addr[4] = (u8)(wl->fuse_nic_addr >> 8); mac_addr[5] = (u8) wl->fuse_nic_addr; skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN); if (!skb) { ret = -ENOMEM; goto out; } NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); ret = cfg80211_testmode_reply(skb); if (ret < 0) goto out; out: mutex_unlock(&wl->mutex); return ret; nla_put_failure: kfree_skb(skb); ret = -EMSGSIZE; goto out; } int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) { struct wl1271 *wl = hw->priv; struct nlattr *tb[WL1271_TM_ATTR_MAX + 1]; int err; err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy); if (err) return err; if (!tb[WL1271_TM_ATTR_CMD_ID]) return -EINVAL; switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) { case WL1271_TM_CMD_TEST: return wl1271_tm_cmd_test(wl, tb); case WL1271_TM_CMD_INTERROGATE: return wl1271_tm_cmd_interrogate(wl, tb); case WL1271_TM_CMD_CONFIGURE: return wl1271_tm_cmd_configure(wl, tb); case WL1271_TM_CMD_SET_PLT_MODE: return wl1271_tm_cmd_set_plt_mode(wl, tb); case WL1271_TM_CMD_RECOVER: return wl1271_tm_cmd_recover(wl, tb); case WL1271_TM_CMD_GET_MAC: return wl12xx_tm_cmd_get_mac(wl, tb); default: return -EOPNOTSUPP; } }
gpl-2.0
LeonardKoenig/android_kernel_sony_msm8x60-caf
drivers/media/video/hexium_orion.c
5062
12928
/* hexium_orion.c - v4l2 driver for the Hexium Orion frame grabber cards Visit http://www.mihu.de/linux/saa7146/ and follow the link to "hexium" for further details about this card. Copyright (C) 2003 Michael Hunold <michael@mihu.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DEBUG_VARIABLE debug #include <media/saa7146_vv.h> #include <linux/module.h> static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "debug verbosity"); /* global variables */ static int hexium_num; #define HEXIUM_HV_PCI6_ORION 1 #define HEXIUM_ORION_1SVHS_3BNC 2 #define HEXIUM_ORION_4BNC 3 #define HEXIUM_INPUTS 9 static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = { { 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, }; #define HEXIUM_AUDIOS 0 struct hexium_data { s8 adr; u8 byte; }; struct hexium { int type; struct video_device *video_dev; struct i2c_adapter i2c_adapter; int cur_input; /* current input */ }; /* Philips SAA7110 decoder default registers */ static u8 hexium_saa7110[53]={ /*00*/ 0x4C,0x3C,0x0D,0xEF,0xBD,0xF0,0x00,0x00, /*08*/ 0xF8,0xF8,0x60,0x60,0x40,0x86,0x18,0x90, /*10*/ 0x00,0x2C,0x40,0x46,0x42,0x1A,0xFF,0xDA, /*18*/ 0xF0,0x8B,0x00,0x00,0x00,0x00,0x00,0x00, /*20*/ 0xD9,0x17,0x40,0x41,0x80,0x41,0x80,0x4F, /*28*/ 0xFE,0x01,0x0F,0x0F,0x03,0x01,0x81,0x03, /*30*/ 0x44,0x75,0x01,0x8C,0x03 }; static struct { struct hexium_data data[8]; } hexium_input_select[] = { { { /* cvbs 1 */ { 0x06, 0x00 }, { 0x20, 0xD9 }, { 0x21, 0x17 }, // 0x16, { 0x22, 0x40 }, { 0x2C, 0x03 }, { 0x30, 0x44 }, { 0x31, 0x75 }, // ?? { 0x21, 0x16 }, // 0x03, } }, { { /* cvbs 2 */ { 0x06, 0x00 }, { 0x20, 0x78 }, { 0x21, 0x07 }, // 0x03, { 0x22, 0xD2 }, { 0x2C, 0x83 }, { 0x30, 0x60 }, { 0x31, 0xB5 }, // ? { 0x21, 0x03 }, } }, { { /* cvbs 3 */ { 0x06, 0x00 }, { 0x20, 0xBA }, { 0x21, 0x07 }, // 0x05, { 0x22, 0x91 }, { 0x2C, 0x03 }, { 0x30, 0x60 }, { 0x31, 0xB5 }, // ?? { 0x21, 0x05 }, // 0x03, } }, { { /* cvbs 4 */ { 0x06, 0x00 }, { 0x20, 0xD8 }, { 0x21, 0x17 }, // 0x16, { 0x22, 0x40 }, { 0x2C, 0x03 }, { 0x30, 0x44 }, { 0x31, 0x75 }, // ?? { 0x21, 0x16 }, // 0x03, } }, { { /* cvbs 5 */ { 0x06, 0x00 }, { 0x20, 0xB8 }, { 0x21, 0x07 }, // 0x05, { 0x22, 0x91 }, { 0x2C, 0x03 }, { 0x30, 0x60 }, { 0x31, 0xB5 }, // ?? { 0x21, 0x05 }, // 0x03, } }, { { /* cvbs 6 */ { 0x06, 0x00 }, { 0x20, 0x7C }, { 0x21, 0x07 }, // 0x03 { 0x22, 0xD2 }, { 0x2C, 0x83 }, { 0x30, 0x60 }, { 0x31, 0xB5 }, // ?? { 0x21, 0x03 }, } }, { { /* y/c 1 */ { 0x06, 0x80 }, { 0x20, 0x59 }, { 0x21, 0x17 }, { 0x22, 0x42 }, { 0x2C, 0xA3 }, { 0x30, 0x44 }, { 0x31, 0x75 }, { 0x21, 0x12 }, } }, { { /* y/c 2 */ { 0x06, 0x80 }, { 0x20, 0x9A }, { 0x21, 0x17 }, { 0x22, 0xB1 }, { 0x2C, 0x13 }, { 0x30, 0x60 }, { 0x31, 0xB5 }, { 0x21, 0x14 }, } }, { { /* y/c 3 */ { 0x06, 0x80 }, { 0x20, 0x3C }, { 0x21, 0x27 }, { 0x22, 0xC1 }, { 0x2C, 0x23 }, { 0x30, 0x44 }, { 0x31, 0x75 }, { 0x21, 0x21 }, } } }; static struct saa7146_standard hexium_standards[] = { { .name = "PAL", .id = V4L2_STD_PAL, .v_offset = 16, .v_field = 288, .h_offset = 1, .h_pixels = 680, .v_max_out = 576, .h_max_out = 768, }, { .name = "NTSC", .id = V4L2_STD_NTSC, .v_offset = 16, .v_field = 240, .h_offset = 1, .h_pixels = 640, .v_max_out = 480, .h_max_out = 640, }, { .name = "SECAM", .id = V4L2_STD_SECAM, .v_offset = 16, .v_field = 288, .h_offset = 1, .h_pixels = 720, .v_max_out = 576, .h_max_out = 768, } }; /* this is only called for old HV-PCI6/Orion cards without eeprom */ static int hexium_probe(struct saa7146_dev *dev) { struct hexium *hexium = NULL; union i2c_smbus_data data; int err = 0; DEB_EE("\n"); /* there are no hexium orion cards with revision 0 saa7146s */ if (0 == dev->revision) { return -EFAULT; } hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL); if (NULL == hexium) { pr_err("hexium_probe: not enough kernel memory\n"); return -ENOMEM; } /* enable i2c-port pins */ saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26)); saa7146_write(dev, DD1_INIT, 0x01000100); saa7146_write(dev, DD1_STREAM_B, 0x00000000); saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26)); hexium->i2c_adapter = (struct i2c_adapter) { .name = "hexium orion", }; saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480); if (i2c_add_adapter(&hexium->i2c_adapter) < 0) { DEB_S("cannot register i2c-device. skipping.\n"); kfree(hexium); return -EFAULT; } /* set SAA7110 control GPIO 0 */ saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTHI); /* set HWControl GPIO number 2 */ saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI); mdelay(10); /* detect newer Hexium Orion cards by subsystem ids */ if (0x17c8 == dev->pci->subsystem_vendor && 0x0101 == dev->pci->subsystem_device) { pr_info("device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs\n"); /* we store the pointer in our private data field */ dev->ext_priv = hexium; hexium->type = HEXIUM_ORION_1SVHS_3BNC; return 0; } if (0x17c8 == dev->pci->subsystem_vendor && 0x2101 == dev->pci->subsystem_device) { pr_info("device is a Hexium Orion w/ 4 BNC inputs\n"); /* we store the pointer in our private data field */ dev->ext_priv = hexium; hexium->type = HEXIUM_ORION_4BNC; return 0; } /* check if this is an old hexium Orion card by looking at a saa7110 at address 0x4e */ if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) { pr_info("device is a Hexium HV-PCI6/Orion (old)\n"); /* we store the pointer in our private data field */ dev->ext_priv = hexium; hexium->type = HEXIUM_HV_PCI6_ORION; return 0; } i2c_del_adapter(&hexium->i2c_adapter); kfree(hexium); return -EFAULT; } /* bring hardware to a sane state. this has to be done, just in case someone wants to capture from this device before it has been properly initialized. the capture engine would badly fail, because no valid signal arrives on the saa7146, thus leading to timeouts and stuff. */ static int hexium_init_done(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; union i2c_smbus_data data; int i = 0; DEB_D("hexium_init_done called\n"); /* initialize the helper ics to useful values */ for (i = 0; i < sizeof(hexium_saa7110); i++) { data.byte = hexium_saa7110[i]; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) { pr_err("failed for address 0x%02x\n", i); } } return 0; } static int hexium_set_input(struct hexium *hexium, int input) { union i2c_smbus_data data; int i = 0; DEB_D("\n"); for (i = 0; i < 8; i++) { int adr = hexium_input_select[input].data[i].adr; data.byte = hexium_input_select[input].data[i].byte; if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, adr, I2C_SMBUS_BYTE_DATA, &data)) { return -1; } pr_debug("%d: 0x%02x => 0x%02x\n", input, adr, data.byte); } return 0; } static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { DEB_EE("VIDIOC_ENUMINPUT %d\n", i->index); if (i->index >= HEXIUM_INPUTS) return -EINVAL; memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input)); DEB_D("v4l2_ioctl: VIDIOC_ENUMINPUT %d\n", i->index); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; *input = hexium->cur_input; DEB_D("VIDIOC_G_INPUT: %d\n", *input); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct hexium *hexium = (struct hexium *) dev->ext_priv; if (input >= HEXIUM_INPUTS) return -EINVAL; hexium->cur_input = input; hexium_set_input(hexium, input); return 0; } static struct saa7146_ext_vv vv_data; /* this function only gets called when the probing was successful */ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE("\n"); saa7146_vv_init(dev, &vv_data); vv_data.ops.vidioc_enum_input = vidioc_enum_input; vv_data.ops.vidioc_g_input = vidioc_g_input; vv_data.ops.vidioc_s_input = vidioc_s_input; if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) { pr_err("cannot register capture v4l2 device. skipping.\n"); return -1; } pr_err("found 'hexium orion' frame grabber-%d\n", hexium_num); hexium_num++; /* the rest */ hexium->cur_input = 0; hexium_init_done(dev); return 0; } static int hexium_detach(struct saa7146_dev *dev) { struct hexium *hexium = (struct hexium *) dev->ext_priv; DEB_EE("dev:%p\n", dev); saa7146_unregister_device(&hexium->video_dev, dev); saa7146_vv_release(dev); hexium_num--; i2c_del_adapter(&hexium->i2c_adapter); kfree(hexium); return 0; } static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *std) { return 0; } static struct saa7146_extension extension; static struct saa7146_pci_extension_data hexium_hv_pci6 = { .ext_priv = "Hexium HV-PCI6 / Orion", .ext = &extension, }; static struct saa7146_pci_extension_data hexium_orion_1svhs_3bnc = { .ext_priv = "Hexium HV-PCI6 / Orion (1 SVHS/3 BNC)", .ext = &extension, }; static struct saa7146_pci_extension_data hexium_orion_4bnc = { .ext_priv = "Hexium HV-PCI6 / Orion (4 BNC)", .ext = &extension, }; static struct pci_device_id pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x0000, .subdevice = 0x0000, .driver_data = (unsigned long) &hexium_hv_pci6, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x0101, .driver_data = (unsigned long) &hexium_orion_1svhs_3bnc, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7146, .subvendor = 0x17c8, .subdevice = 0x2101, .driver_data = (unsigned long) &hexium_orion_4bnc, }, { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_ext_vv vv_data = { .inputs = HEXIUM_INPUTS, .capabilities = 0, .stds = &hexium_standards[0], .num_stds = sizeof(hexium_standards) / sizeof(struct saa7146_standard), .std_callback = &std_callback, }; static struct saa7146_extension extension = { .name = "hexium HV-PCI6 Orion", .flags = 0, // SAA7146_USE_I2C_IRQ, .pci_tbl = &pci_tbl[0], .module = THIS_MODULE, .probe = hexium_probe, .attach = hexium_attach, .detach = hexium_detach, .irq_mask = 0, .irq_func = NULL, }; static int __init hexium_init_module(void) { if (0 != saa7146_register_extension(&extension)) { DEB_S("failed to register extension\n"); return -ENODEV; } return 0; } static void __exit hexium_cleanup_module(void) { saa7146_unregister_extension(&extension); } module_init(hexium_init_module); module_exit(hexium_cleanup_module); MODULE_DESCRIPTION("video4linux-2 driver for Hexium Orion frame grabber cards"); MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_LICENSE("GPL");
gpl-2.0
smac0628/htc_gpe_51
arch/sh/kernel/cpu/sh4/setup-sh7760.c
5062
8901
/* * SH7760 Setup * * Copyright (C) 2006 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/sh_timer.h> #include <linux/serial_sci.h> #include <linux/io.h> enum { UNUSED = 0, /* interrupt sources */ IRL0, IRL1, IRL2, IRL3, HUDI, GPIOI, DMAC, IRQ4, IRQ5, IRQ6, IRQ7, HCAN20, HCAN21, SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC, DMABRG0, DMABRG1, DMABRG2, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, HSPI, MMCIF0, MMCIF1, MMCIF2, MMCIF3, MFI, ADC, CMT, TMU0, TMU1, TMU2, WDT, REF, /* interrupt groups */ DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), INTC_VECT(DMAC, 0x6c0), INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820), INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860), INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920), INTC_VECT(SSI0, 0x940), INTC_VECT(SSI1, 0x960), INTC_VECT(HAC0, 0x980), INTC_VECT(HAC1, 0x9a0), INTC_VECT(I2C0, 0x9c0), INTC_VECT(I2C1, 0x9e0), INTC_VECT(USB, 0xa00), INTC_VECT(LCDC, 0xa20), INTC_VECT(DMABRG0, 0xa80), INTC_VECT(DMABRG1, 0xaa0), INTC_VECT(DMABRG2, 0xac0), INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0), INTC_VECT(SCIF1_ERI, 0xb00), INTC_VECT(SCIF1_RXI, 0xb20), INTC_VECT(SCIF1_BRI, 0xb40), INTC_VECT(SCIF1_TXI, 0xb60), INTC_VECT(SCIF2_ERI, 0xb80), INTC_VECT(SCIF2_RXI, 0xba0), INTC_VECT(SCIF2_BRI, 0xbc0), INTC_VECT(SCIF2_TXI, 0xbe0), INTC_VECT(SIM_ERI, 0xc00), INTC_VECT(SIM_RXI, 0xc20), INTC_VECT(SIM_TXI, 0xc40), INTC_VECT(SIM_TEI, 0xc60), INTC_VECT(HSPI, 0xc80), INTC_VECT(MMCIF0, 0xd00), INTC_VECT(MMCIF1, 0xd20), INTC_VECT(MMCIF2, 0xd40), INTC_VECT(MMCIF3, 0xd60), INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */ INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), INTC_VECT(WDT, 0x560), INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2), INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI), INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21, SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC, 0, DMABRG0, DMABRG1, DMABRG2, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, } }, { 0xfe080044, 0xfe080064, 32, /* INTMSK04 / INTMSKCLR04 */ { 0, 0, 0, 0, 0, 0, 0, 0, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, HSPI, MMCIF0, MMCIF1, MMCIF2, MMCIF3, 0, 0, 0, 0, 0, 0, 0, 0, MFI, 0, 0, 0, 0, ADC, CMT, } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, 0, HUDI } }, { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfe080004, 0, 32, 4, /* INTPRI04 */ { HCAN20, HCAN21, SSI0, SSI1, HAC0, HAC1, I2C0, I2C1 } }, { 0xfe080008, 0, 32, 4, /* INTPRI08 */ { USB, LCDC, DMABRG, SCIF0, SCIF1, SCIF2, SIM, HSPI } }, { 0xfe08000c, 0, 32, 4, /* INTPRI0C */ { 0, 0, MMCIF, 0, MFI, 0, ADC, CMT } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups, mask_registers, prio_registers, NULL); static struct intc_vect vectors_irq[] __initdata = { INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), }; static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, mask_registers, prio_registers, NULL); static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfe600000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 52, 53, 55, 54 }, .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfe610000, .flags = UPF_BOOT_AUTOCONF, .type = PORT_SCIF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .irqs = { 72, 73, 75, 74 }, .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfe620000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 76, 77, 79, 78 }, .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfe480000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCI, .irqs = { 80, 81, 82, 0 }, .regshift = 2, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002f, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct platform_device *sh7760_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &tmu0_device, &tmu1_device, &tmu2_device, }; static int __init sh7760_devices_setup(void) { return platform_add_devices(sh7760_devices, ARRAY_SIZE(sh7760_devices)); } arch_initcall(sh7760_devices_setup); static struct platform_device *sh7760_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &tmu0_device, &tmu1_device, &tmu2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7760_early_devices, ARRAY_SIZE(sh7760_early_devices)); } #define INTC_ICR 0xffd00000UL #define INTC_ICR_IRLM (1 << 7) void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); register_intc_controller(&intc_desc_irq); break; default: BUG(); } } void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); }
gpl-2.0
crazyleen/linux-source-3.2
drivers/ata/pata_marvell.c
7878
4541
/* * Marvell PATA driver. * * For the moment we drive the PATA port in legacy mode. That * isn't making full use of the device functionality but it is * easy to get working. * * (c) 2006 Red Hat */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/ata.h> #define DRV_NAME "pata_marvell" #define DRV_VERSION "0.1.6" /** * marvell_pata_active - check if PATA is active * @pdev: PCI device * * Returns 1 if the PATA port may be active. We know how to check this * for the 6145 but not the other devices */ static int marvell_pata_active(struct pci_dev *pdev) { int i; u32 devices; void __iomem *barp; /* We don't yet know how to do this for other devices */ if (pdev->device != 0x6145) return 1; barp = pci_iomap(pdev, 5, 0x10); if (barp == NULL) return -ENOMEM; printk("BAR5:"); for(i = 0; i <= 0x0F; i++) printk("%02X:%02X ", i, ioread8(barp + i)); printk("\n"); devices = ioread32(barp + 0x0C); pci_iounmap(pdev, barp); if (devices & 0x10) return 1; return 0; } /** * marvell_pre_reset - probe begin * @link: link * @deadline: deadline jiffies for the operation * * Perform the PATA port setup we need. */ static int marvell_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (pdev->device == 0x6145 && ap->port_no == 0 && !marvell_pata_active(pdev)) /* PATA enable ? */ return -ENOENT; return ata_sff_prereset(link, deadline); } static int marvell_cable_detect(struct ata_port *ap) { /* Cable type */ switch(ap->port_no) { case 0: if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1) return ATA_CBL_PATA40; return ATA_CBL_PATA80; case 1: /* Legacy SATA port */ return ATA_CBL_SATA; } BUG(); return 0; /* Our BUG macro needs the right markup */ } /* No PIO or DMA methods needed for this device */ static struct scsi_host_template marvell_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, .prereset = marvell_pre_reset, }; /** * marvell_init_one - Register Marvell ATA PCI device with kernel services * @pdev: PCI device to register * @ent: Entry in marvell_pci_tbl matching with @pdev * * Called from kernel PCI layer. * * LOCKING: * Inherited from PCI layer (may sleep). * * RETURNS: * Zero on success, or -ERRNO value. */ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, .port_ops = &marvell_ops, }; static const struct ata_port_info info_sata = { /* Slave possible as its magically mapped not real */ .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &marvell_ops, }; const struct ata_port_info *ppi[] = { &info, &info_sata }; if (pdev->device == 0x6101) ppi[1] = &ata_dummy_port_info; #if defined(CONFIG_SATA_AHCI) || defined(CONFIG_SATA_AHCI_MODULE) if (!marvell_pata_active(pdev)) { printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n"); return -ENODEV; } #endif return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { { PCI_DEVICE(0x11AB, 0x6101), }, { PCI_DEVICE(0x11AB, 0x6121), }, { PCI_DEVICE(0x11AB, 0x6123), }, { PCI_DEVICE(0x11AB, 0x6145), }, { PCI_DEVICE(0x1B4B, 0x91A0), }, { PCI_DEVICE(0x1B4B, 0x91A4), }, { } /* terminate list */ }; static struct pci_driver marvell_pci_driver = { .name = DRV_NAME, .id_table = marvell_pci_tbl, .probe = marvell_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; static int __init marvell_init(void) { return pci_register_driver(&marvell_pci_driver); } static void __exit marvell_exit(void) { pci_unregister_driver(&marvell_pci_driver); } module_init(marvell_init); module_exit(marvell_exit); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell ATA in legacy mode"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, marvell_pci_tbl); MODULE_VERSION(DRV_VERSION);
gpl-2.0
loxdegio/GT_S7500_LoxKernel_trebon
GT-S7500_Kernel/fs/ecryptfs/messaging.c
8134
17771
/** * eCryptfs: Linux filesystem encryption layer * * Copyright (C) 2004-2008 International Business Machines Corp. * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> * Tyler Hicks <tyhicks@ou.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/nsproxy.h> #include "ecryptfs_kernel.h" static LIST_HEAD(ecryptfs_msg_ctx_free_list); static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); static struct mutex ecryptfs_msg_ctx_lists_mux; static struct hlist_head *ecryptfs_daemon_hash; struct mutex ecryptfs_daemon_hash_mux; static int ecryptfs_hash_bits; #define ecryptfs_uid_hash(uid) \ hash_long((unsigned long)uid, ecryptfs_hash_bits) static u32 ecryptfs_msg_counter; static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; /** * ecryptfs_acquire_free_msg_ctx * @msg_ctx: The context that was acquired from the free list * * Acquires a context element from the free list and locks the mutex * on the context. Sets the msg_ctx task to current. Returns zero on * success; non-zero on error or upon failure to acquire a free * context element. Must be called with ecryptfs_msg_ctx_lists_mux * held. */ static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) { struct list_head *p; int rc; if (list_empty(&ecryptfs_msg_ctx_free_list)) { printk(KERN_WARNING "%s: The eCryptfs free " "context list is empty. It may be helpful to " "specify the ecryptfs_message_buf_len " "parameter to be greater than the current " "value of [%d]\n", __func__, ecryptfs_message_buf_len); rc = -ENOMEM; goto out; } list_for_each(p, &ecryptfs_msg_ctx_free_list) { *msg_ctx = list_entry(p, struct ecryptfs_msg_ctx, node); if (mutex_trylock(&(*msg_ctx)->mux)) { (*msg_ctx)->task = current; rc = 0; goto out; } } rc = -ENOMEM; out: return rc; } /** * ecryptfs_msg_ctx_free_to_alloc * @msg_ctx: The context to move from the free list to the alloc list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&msg_ctx->node, &ecryptfs_msg_ctx_alloc_list); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_PENDING; msg_ctx->counter = ++ecryptfs_msg_counter; } /** * ecryptfs_msg_ctx_alloc_to_free * @msg_ctx: The context to move from the alloc list to the free list * * Must be called with ecryptfs_msg_ctx_lists_mux held. */ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) { list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); if (msg_ctx->msg) kfree(msg_ctx->msg); msg_ctx->msg = NULL; msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; } /** * ecryptfs_find_daemon_by_euid * @euid: The effective user id which maps to the desired daemon id * @user_ns: The namespace in which @euid applies * @daemon: If return value is zero, points to the desired daemon pointer * * Must be called with ecryptfs_daemon_hash_mux held. * * Search the hash list for the given user id. * * Returns zero if the user id exists in the list; non-zero otherwise. */ int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, struct user_namespace *user_ns) { struct hlist_node *elem; int rc; hlist_for_each_entry(*daemon, elem, &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)], euid_chain) { if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) { rc = 0; goto out; } } rc = -EINVAL; out: return rc; } /** * ecryptfs_spawn_daemon - Create and initialize a new daemon struct * @daemon: Pointer to set to newly allocated daemon struct * @euid: Effective user id for the daemon * @user_ns: The namespace in which @euid applies * @pid: Process id for the daemon * * Must be called ceremoniously while in possession of * ecryptfs_sacred_daemon_hash_mux * * Returns zero on success; non-zero otherwise */ int ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, struct user_namespace *user_ns, struct pid *pid) { int rc = 0; (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL); if (!(*daemon)) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); goto out; } (*daemon)->euid = euid; (*daemon)->user_ns = get_user_ns(user_ns); (*daemon)->pid = get_pid(pid); (*daemon)->task = current; mutex_init(&(*daemon)->mux); INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); init_waitqueue_head(&(*daemon)->wait); (*daemon)->num_queued_msg_ctx = 0; hlist_add_head(&(*daemon)->euid_chain, &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]); out: return rc; } /** * ecryptfs_exorcise_daemon - Destroy the daemon struct * * Must be called ceremoniously while in possession of * ecryptfs_daemon_hash_mux and the daemon's own mux. */ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) { struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp; int rc = 0; mutex_lock(&daemon->mux); if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { rc = -EBUSY; printk(KERN_WARNING "%s: Attempt to destroy daemon with pid " "[0x%p], but it is in the midst of a read or a poll\n", __func__, daemon->pid); mutex_unlock(&daemon->mux); goto out; } list_for_each_entry_safe(msg_ctx, msg_ctx_tmp, &daemon->msg_ctx_out_queue, daemon_out_list) { list_del(&msg_ctx->daemon_out_list); daemon->num_queued_msg_ctx--; printk(KERN_WARNING "%s: Warning: dropping message that is in " "the out queue of a dying daemon\n", __func__); ecryptfs_msg_ctx_alloc_to_free(msg_ctx); } hlist_del(&daemon->euid_chain); if (daemon->task) wake_up_process(daemon->task); if (daemon->pid) put_pid(daemon->pid); if (daemon->user_ns) put_user_ns(daemon->user_ns); mutex_unlock(&daemon->mux); kzfree(daemon); out: return rc; } /** * ecryptfs_process_quit * @euid: The user ID owner of the message * @user_ns: The namespace in which @euid applies * @pid: The process ID for the userspace program that sent the * message * * Deletes the corresponding daemon for the given euid and pid, if * it is the registered that is requesting the deletion. Returns zero * after deleting the desired daemon; non-zero otherwise. */ int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, struct pid *pid) { struct ecryptfs_daemon *daemon; int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns); if (rc || !daemon) { rc = -EINVAL; printk(KERN_ERR "Received request from user [%d] to " "unregister unrecognized daemon [0x%p]\n", euid, pid); goto out_unlock; } rc = ecryptfs_exorcise_daemon(daemon); out_unlock: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_process_reponse * @msg: The ecryptfs message received; the caller should sanity check * msg->data_len and free the memory * @pid: The process ID of the userspace application that sent the * message * @seq: The sequence number of the message; must match the sequence * number for the existing message context waiting for this * response * * Processes a response message after sending an operation request to * userspace. Some other process is awaiting this response. Before * sending out its first communications, the other process allocated a * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The * response message contains this index so that we can copy over the * response message into the msg_ctx that the process holds a * reference to. The other process is going to wake up, check to see * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then * proceed to read off and process the response message. Returns zero * upon delivery to desired context element; non-zero upon delivery * failure or error. * * Returns zero on success; non-zero otherwise */ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, struct user_namespace *user_ns, struct pid *pid, u32 seq) { struct ecryptfs_daemon *uninitialized_var(daemon); struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; struct nsproxy *nsproxy; struct user_namespace *tsk_user_ns; uid_t ctx_euid; int rc; if (msg->index >= ecryptfs_message_buf_len) { rc = -EINVAL; printk(KERN_ERR "%s: Attempt to reference " "context buffer at index [%d]; maximum " "allowable is [%d]\n", __func__, msg->index, (ecryptfs_message_buf_len - 1)); goto out; } msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; mutex_lock(&msg_ctx->mux); mutex_lock(&ecryptfs_daemon_hash_mux); rcu_read_lock(); nsproxy = task_nsproxy(msg_ctx->task); if (nsproxy == NULL) { rc = -EBADMSG; printk(KERN_ERR "%s: Receiving process is a zombie. Dropping " "message.\n", __func__); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); goto wake_up; } tsk_user_ns = __task_cred(msg_ctx->task)->user->user_ns; ctx_euid = task_euid(msg_ctx->task); rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { rc = -EBADMSG; printk(KERN_WARNING "%s: User [%d] received a " "message response from process [0x%p] but does " "not have a registered daemon\n", __func__, ctx_euid, pid); goto wake_up; } if (ctx_euid != euid) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user " "[%d]; expected message from user [%d]\n", __func__, euid, ctx_euid); goto unlock; } if (tsk_user_ns != user_ns) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user_ns " "[0x%p]; expected message from user_ns [0x%p]\n", __func__, user_ns, tsk_user_ns); goto unlock; } if (daemon->pid != pid) { rc = -EBADMSG; printk(KERN_ERR "%s: User [%d] sent a message response " "from an unrecognized process [0x%p]\n", __func__, ctx_euid, pid); goto unlock; } if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { rc = -EINVAL; printk(KERN_WARNING "%s: Desired context element is not " "pending a response\n", __func__); goto unlock; } else if (msg_ctx->counter != seq) { rc = -EINVAL; printk(KERN_WARNING "%s: Invalid message sequence; " "expected [%d]; received [%d]\n", __func__, msg_ctx->counter, seq); goto unlock; } msg_size = (sizeof(*msg) + msg->data_len); msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL); if (!msg_ctx->msg) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate [%zd] bytes of " "GFP_KERNEL memory\n", __func__, msg_size); goto unlock; } memcpy(msg_ctx->msg, msg, msg_size); msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE; rc = 0; wake_up: wake_up_process(msg_ctx->task); unlock: mutex_unlock(&msg_ctx->mux); out: return rc; } /** * ecryptfs_send_message_locked * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Must be called with ecryptfs_daemon_hash_mux held. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; uid_t euid = current_euid(); int rc; rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { rc = -ENOTCONN; printk(KERN_ERR "%s: User [%d] does not have a daemon " "registered\n", __func__, euid); goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_acquire_free_msg_ctx(msg_ctx); if (rc) { mutex_unlock(&ecryptfs_msg_ctx_lists_mux); printk(KERN_WARNING "%s: Could not claim a free " "context element\n", __func__); goto out; } ecryptfs_msg_ctx_free_to_alloc(*msg_ctx); mutex_unlock(&(*msg_ctx)->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, 0, daemon); if (rc) printk(KERN_ERR "%s: Error attempting to send message to " "userspace daemon; rc = [%d]\n", __func__, rc); out: return rc; } /** * ecryptfs_send_message * @data: The data to send * @data_len: The length of data * @msg_ctx: The message context allocated for the send * * Grabs ecryptfs_daemon_hash_mux. * * Returns zero on success; non-zero otherwise */ int ecryptfs_send_message(char *data, int data_len, struct ecryptfs_msg_ctx **msg_ctx) { int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_send_message_locked(data, data_len, ECRYPTFS_MSG_REQUEST, msg_ctx); mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; } /** * ecryptfs_wait_for_response * @msg_ctx: The context that was assigned when sending a message * @msg: The incoming message from userspace; not set if rc != 0 * * Sleeps until awaken by ecryptfs_receive_message or until the amount * of time exceeds ecryptfs_message_wait_timeout. If zero is * returned, msg will point to a valid message from userspace; a * non-zero value is returned upon failure to receive a message or an * error occurs. Callee must free @msg on success. */ int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, struct ecryptfs_message **msg) { signed long timeout = ecryptfs_message_wait_timeout * HZ; int rc = 0; sleep: timeout = schedule_timeout_interruptible(timeout); mutex_lock(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&msg_ctx->mux); if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_DONE) { if (timeout) { mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); goto sleep; } rc = -ENOMSG; } else { *msg = msg_ctx->msg; msg_ctx->msg = NULL; } ecryptfs_msg_ctx_alloc_to_free(msg_ctx); mutex_unlock(&msg_ctx->mux); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); return rc; } int __init ecryptfs_init_messaging(void) { int i; int rc = 0; if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) { ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS; printk(KERN_WARNING "%s: Specified number of users is " "too large, defaulting to [%d] users\n", __func__, ecryptfs_number_of_users); } mutex_init(&ecryptfs_daemon_hash_mux); mutex_lock(&ecryptfs_daemon_hash_mux); ecryptfs_hash_bits = 1; while (ecryptfs_number_of_users >> ecryptfs_hash_bits) ecryptfs_hash_bits++; ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) * (1 << ecryptfs_hash_bits)), GFP_KERNEL); if (!ecryptfs_daemon_hash) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); mutex_unlock(&ecryptfs_daemon_hash_mux); goto out; } for (i = 0; i < (1 << ecryptfs_hash_bits); i++) INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); mutex_unlock(&ecryptfs_daemon_hash_mux); ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) * ecryptfs_message_buf_len), GFP_KERNEL); if (!ecryptfs_msg_ctx_arr) { rc = -ENOMEM; printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); goto out; } mutex_init(&ecryptfs_msg_ctx_lists_mux); mutex_lock(&ecryptfs_msg_ctx_lists_mux); ecryptfs_msg_counter = 0; for (i = 0; i < ecryptfs_message_buf_len; i++) { INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node); INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list); mutex_init(&ecryptfs_msg_ctx_arr[i].mux); mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); ecryptfs_msg_ctx_arr[i].index = i; ecryptfs_msg_ctx_arr[i].state = ECRYPTFS_MSG_CTX_STATE_FREE; ecryptfs_msg_ctx_arr[i].counter = 0; ecryptfs_msg_ctx_arr[i].task = NULL; ecryptfs_msg_ctx_arr[i].msg = NULL; list_add_tail(&ecryptfs_msg_ctx_arr[i].node, &ecryptfs_msg_ctx_free_list); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } mutex_unlock(&ecryptfs_msg_ctx_lists_mux); rc = ecryptfs_init_ecryptfs_miscdev(); if (rc) ecryptfs_release_messaging(); out: return rc; } void ecryptfs_release_messaging(void) { if (ecryptfs_msg_ctx_arr) { int i; mutex_lock(&ecryptfs_msg_ctx_lists_mux); for (i = 0; i < ecryptfs_message_buf_len; i++) { mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); if (ecryptfs_msg_ctx_arr[i].msg) kfree(ecryptfs_msg_ctx_arr[i].msg); mutex_unlock(&ecryptfs_msg_ctx_arr[i].mux); } kfree(ecryptfs_msg_ctx_arr); mutex_unlock(&ecryptfs_msg_ctx_lists_mux); } if (ecryptfs_daemon_hash) { struct hlist_node *elem; struct ecryptfs_daemon *daemon; int i; mutex_lock(&ecryptfs_daemon_hash_mux); for (i = 0; i < (1 << ecryptfs_hash_bits); i++) { int rc; hlist_for_each_entry(daemon, elem, &ecryptfs_daemon_hash[i], euid_chain) { rc = ecryptfs_exorcise_daemon(daemon); if (rc) printk(KERN_ERR "%s: Error whilst " "attempting to destroy daemon; " "rc = [%d]. Dazed and confused, " "but trying to continue.\n", __func__, rc); } } kfree(ecryptfs_daemon_hash); mutex_unlock(&ecryptfs_daemon_hash_mux); } ecryptfs_destroy_ecryptfs_miscdev(); return; }
gpl-2.0
Nokius/android_kernel_oppo_n1
arch/x86/mm/pat_rbtree.c
8902
6025
/* * Handle caching attributes in page tables (PAT) * * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Suresh B Siddha <suresh.b.siddha@intel.com> * * Interval tree (augmented rbtree) used to store the PAT memory type * reservations. */ #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/gfp.h> #include <asm/pgtable.h> #include <asm/pat.h> #include "pat_internal.h" /* * The memtype tree keeps track of memory type for specific * physical memory areas. Without proper tracking, conflicting memory * types in different mappings can cause CPU cache corruption. * * The tree is an interval tree (augmented rbtree) with tree ordered * on starting address. Tree can contain multiple entries for * different regions which overlap. All the aliases have the same * cache attributes of course. * * memtype_lock protects the rbtree. */ static struct rb_root memtype_rbroot = RB_ROOT; static int is_node_overlap(struct memtype *node, u64 start, u64 end) { if (node->start >= end || node->end <= start) return 0; return 1; } static u64 get_subtree_max_end(struct rb_node *node) { u64 ret = 0; if (node) { struct memtype *data = container_of(node, struct memtype, rb); ret = data->subtree_max_end; } return ret; } /* Update 'subtree_max_end' for a node, based on node and its children */ static void memtype_rb_augment_cb(struct rb_node *node, void *__unused) { struct memtype *data; u64 max_end, child_max_end; if (!node) return; data = container_of(node, struct memtype, rb); max_end = data->end; child_max_end = get_subtree_max_end(node->rb_right); if (child_max_end > max_end) max_end = child_max_end; child_max_end = get_subtree_max_end(node->rb_left); if (child_max_end > max_end) max_end = child_max_end; data->subtree_max_end = max_end; } /* Find the first (lowest start addr) overlapping range from rb tree */ static struct memtype *memtype_rb_lowest_match(struct rb_root *root, u64 start, u64 end) { struct rb_node *node = root->rb_node; struct memtype *last_lower = NULL; while (node) { struct memtype *data = container_of(node, struct memtype, rb); if (get_subtree_max_end(node->rb_left) > start) { /* Lowest overlap if any must be on left side */ node = node->rb_left; } else if (is_node_overlap(data, start, end)) { last_lower = data; break; } else if (start >= data->start) { /* Lowest overlap if any must be on right side */ node = node->rb_right; } else { break; } } return last_lower; /* Returns NULL if there is no overlap */ } static struct memtype *memtype_rb_exact_match(struct rb_root *root, u64 start, u64 end) { struct memtype *match; match = memtype_rb_lowest_match(root, start, end); while (match != NULL && match->start < end) { struct rb_node *node; if (match->start == start && match->end == end) return match; node = rb_next(&match->rb); if (node) match = container_of(node, struct memtype, rb); else match = NULL; } return NULL; /* Returns NULL if there is no exact match */ } static int memtype_rb_check_conflict(struct rb_root *root, u64 start, u64 end, unsigned long reqtype, unsigned long *newtype) { struct rb_node *node; struct memtype *match; int found_type = reqtype; match = memtype_rb_lowest_match(&memtype_rbroot, start, end); if (match == NULL) goto success; if (match->type != found_type && newtype == NULL) goto failure; dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); found_type = match->type; node = rb_next(&match->rb); while (node) { match = container_of(node, struct memtype, rb); if (match->start >= end) /* Checked all possible matches */ goto success; if (is_node_overlap(match, start, end) && match->type != found_type) { goto failure; } node = rb_next(&match->rb); } success: if (newtype) *newtype = found_type; return 0; failure: printk(KERN_INFO "%s:%d conflicting memory types " "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start, end, cattr_name(found_type), cattr_name(match->type)); return -EBUSY; } static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) { struct rb_node **node = &(root->rb_node); struct rb_node *parent = NULL; while (*node) { struct memtype *data = container_of(*node, struct memtype, rb); parent = *node; if (newdata->start <= data->start) node = &((*node)->rb_left); else if (newdata->start > data->start) node = &((*node)->rb_right); } rb_link_node(&newdata->rb, parent, node); rb_insert_color(&newdata->rb, root); rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL); } int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) { int err = 0; err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, new->type, ret_type); if (!err) { if (ret_type) new->type = *ret_type; new->subtree_max_end = new->end; memtype_rb_insert(&memtype_rbroot, new); } return err; } struct memtype *rbt_memtype_erase(u64 start, u64 end) { struct rb_node *deepest; struct memtype *data; data = memtype_rb_exact_match(&memtype_rbroot, start, end); if (!data) goto out; deepest = rb_augment_erase_begin(&data->rb); rb_erase(&data->rb, &memtype_rbroot); rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL); out: return data; } struct memtype *rbt_memtype_lookup(u64 addr) { struct memtype *data; data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE); return data; } #if defined(CONFIG_DEBUG_FS) int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) { struct rb_node *node; int i = 1; node = rb_first(&memtype_rbroot); while (node && pos != i) { node = rb_next(node); i++; } if (node) { /* pos == i */ struct memtype *this = container_of(node, struct memtype, rb); *out = *this; return 0; } else { return 1; } } #endif
gpl-2.0
googyanas/Googy-Max-Kernel
drivers/net/wireless/rt2x00/rt2x00leds.c
9158
6602
/* Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> <http://rt2x00.serialmonkey.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* Module: rt2x00lib Abstract: rt2x00 led specific routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) { struct rt2x00_led *led = &rt2x00dev->led_qual; unsigned int brightness; if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED)) return; /* * Led handling requires a positive value for the rssi, * to do that correctly we need to add the correction. */ rssi += rt2x00dev->rssi_offset; /* * Get the rssi level, this is used to convert the rssi * to a LED value inside the range LED_OFF - LED_FULL. */ if (rssi <= 30) rssi = 0; else if (rssi <= 39) rssi = 1; else if (rssi <= 49) rssi = 2; else if (rssi <= 53) rssi = 3; else if (rssi <= 63) rssi = 4; else rssi = 5; /* * Note that we must _not_ send LED_OFF since the driver * is going to calculate the value and might use it in a * division. */ brightness = ((LED_FULL / 6) * rssi) + 1; if (brightness != led->led_dev.brightness) { led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } } static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled) { unsigned int brightness = enabled ? LED_FULL : LED_OFF; if (!(led->flags & LED_REGISTERED)) return; led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY) rt2x00led_led_simple(&rt2x00dev->led_qual, enabled); } void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC) rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled); } void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_radio.type == LED_TYPE_RADIO) rt2x00led_led_simple(&rt2x00dev->led_radio, enabled); } static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, const char *name) { struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); int retval; led->led_dev.name = name; led->led_dev.brightness = LED_OFF; retval = led_classdev_register(device, &led->led_dev); if (retval) { ERROR(rt2x00dev, "Failed to register led handler.\n"); return retval; } led->flags |= LED_REGISTERED; return 0; } void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) { char dev_name[16]; char name[32]; int retval; unsigned long on_period; unsigned long off_period; snprintf(dev_name, sizeof(dev_name), "%s-%s", rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy)); if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s::radio", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_radio, name); if (retval) goto exit_fail; } if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s::assoc", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_assoc, name); if (retval) goto exit_fail; } if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s::quality", dev_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_qual, name); if (retval) goto exit_fail; } /* * Initialize blink time to default value: * On period: 70ms * Off period: 30ms */ if (rt2x00dev->led_radio.led_dev.blink_set) { on_period = 70; off_period = 30; rt2x00dev->led_radio.led_dev.blink_set( &rt2x00dev->led_radio.led_dev, &on_period, &off_period); } return; exit_fail: rt2x00leds_unregister(rt2x00dev); } static void rt2x00leds_unregister_led(struct rt2x00_led *led) { led_classdev_unregister(&led->led_dev); /* * This might look weird, but when we are unregistering while * suspended the led is already off, and since we haven't * fully resumed yet, access to the device might not be * possible yet. */ if (!(led->led_dev.flags & LED_SUSPENDED)) led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->flags &= ~LED_REGISTERED; } void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_suspend_led(struct rt2x00_led *led) { led_classdev_suspend(&led->led_dev); /* This shouldn't be needed, but just to be safe */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_resume_led(struct rt2x00_led *led) { led_classdev_resume(&led->led_dev); /* Device might have enabled the LEDS during resume */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_radio); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_qual); }
gpl-2.0
BOOTMGR/GT-I9070_kernel
fs/hfs/bitmap.c
10182
5969
/* * linux/fs/hfs/bitmap.c * * Copyright (C) 1996-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * Based on GPLed code Copyright (C) 1995 Michael Dreher * * This file contains the code to modify the volume bitmap: * search/set/clear bits. */ #include "hfs_fs.h" /* * hfs_find_zero_bit() * * Description: * Given a block of memory, its length in bits, and a starting bit number, * determine the number of the first zero bits (in left-to-right ordering) * in that range. * * Returns >= 'size' if no zero bits are found in the range. * * Accesses memory in 32-bit aligned chunks of 32-bits and thus * may read beyond the 'size'th bit. */ static u32 hfs_find_set_zero_bits(__be32 *bitmap, u32 size, u32 offset, u32 *max) { __be32 *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; curr = bitmap + (offset / 32); end = bitmap + ((size + 31) / 32); /* scan the first partial u32 for zero bits */ val = *curr; if (~val) { n = be32_to_cpu(val); i = offset % 32; mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } /* scan complete u32s for the first zero bit */ while (++curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } } return size; found: start = (curr - bitmap) * 32 + i; if (start >= size) return start; /* do any partial u32 at the start */ len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); /* do full u32s */ while (1) { n = be32_to_cpu(*curr); if (len < 32) break; if (n) { len = 32; break; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } /* do any partial u32 at end */ mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); *max = (curr - bitmap) * 32 + i - start; return start; } /* * hfs_vbm_search_free() * * Description: * Search for 'num_bits' consecutive cleared bits in the bitmap blocks of * the hfs MDB. 'mdb' had better be locked or the returned range * may be no longer free, when this functions returns! * XXX Currently the search starts from bit 0, but it should start with * the bit number stored in 's_alloc_ptr' of the MDB. * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * u16 *num_bits: Pointer to the number of cleared bits * to search for * Output Variable(s): * u16 *num_bits: The number of consecutive clear bits of the * returned range. If the bitmap is fragmented, this will be less than * requested and it will be zero, when the disk is full. * Returns: * The number of the first bit of the range of cleared bits which has been * found. When 'num_bits' is zero, this is invalid! * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * 'num_bits' points to a variable of type (u16), which contains * the number of cleared bits to find. * Postconditions: * 'num_bits' is set to the length of the found sequence. */ u32 hfs_vbm_search_free(struct super_block *sb, u32 goal, u32 *num_bits) { void *bitmap; u32 pos; /* make sure we have actual work to perform */ if (!*num_bits) return 0; mutex_lock(&HFS_SB(sb)->bitmap_lock); bitmap = HFS_SB(sb)->bitmap; pos = hfs_find_set_zero_bits(bitmap, HFS_SB(sb)->fs_ablocks, goal, num_bits); if (pos >= HFS_SB(sb)->fs_ablocks) { if (goal) pos = hfs_find_set_zero_bits(bitmap, goal, 0, num_bits); if (pos >= HFS_SB(sb)->fs_ablocks) { *num_bits = pos = 0; goto out; } } dprint(DBG_BITMAP, "alloc_bits: %u,%u\n", pos, *num_bits); HFS_SB(sb)->free_ablocks -= *num_bits; hfs_bitmap_dirty(sb); out: mutex_unlock(&HFS_SB(sb)->bitmap_lock); return pos; } /* * hfs_clear_vbm_bits() * * Description: * Clear the requested bits in the volume bitmap of the hfs filesystem * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * u16 start: The offset of the first bit * u16 count: The number of bits * Output Variable(s): * None * Returns: * 0: no error * -1: One of the bits was already clear. This is a strange * error and when it happens, the filesystem must be repaired! * -2: One or more of the bits are out of range of the bitmap. * Preconditions: * 'mdb' points to a "valid" (struct hfs_mdb). * Postconditions: * Starting with bit number 'start', 'count' bits in the volume bitmap * are cleared. The affected bitmap blocks are marked "dirty", the free * block count of the MDB is updated and the MDB is marked dirty. */ int hfs_clear_vbm_bits(struct super_block *sb, u16 start, u16 count) { __be32 *curr; u32 mask; int i, len; /* is there any actual work to be done? */ if (!count) return 0; dprint(DBG_BITMAP, "clear_bits: %u,%u\n", start, count); /* are all of the bits in range? */ if ((start + count) > HFS_SB(sb)->fs_ablocks) return -2; mutex_lock(&HFS_SB(sb)->bitmap_lock); /* bitmap is always on a 32-bit boundary */ curr = HFS_SB(sb)->bitmap + (start / 32); len = count; /* do any partial u32 at the start */ i = start % 32; if (i) { int j = 32 - i; mask = 0xffffffffU << j; if (j > count) { mask |= 0xffffffffU >> (i + count); *curr &= cpu_to_be32(mask); goto out; } *curr++ &= cpu_to_be32(mask); count -= j; } /* do full u32s */ while (count >= 32) { *curr++ = 0; count -= 32; } /* do any partial u32 at end */ if (count) { mask = 0xffffffffU >> count; *curr &= cpu_to_be32(mask); } out: HFS_SB(sb)->free_ablocks += len; mutex_unlock(&HFS_SB(sb)->bitmap_lock); hfs_bitmap_dirty(sb); return 0; }
gpl-2.0
evilwombat/gopro-linux
drivers/media/dvb/frontends/dibx000_common.c
199
4809
#include <linux/i2c.h> #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0) static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val) { u8 b[4] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff, }; struct i2c_msg msg = { .addr = mst->i2c_addr,.flags = 0,.buf = b,.len = 4 }; return i2c_transfer(mst->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; } static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf) { if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) { dprintk("selecting interface: %d", intf); mst->selected_interface = intf; return dibx000_write_word(mst, mst->base_reg + 4, intf); } return 0; } static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4], u8 addr, int onoff) { u16 val; if (onoff) val = addr << 8; // bit 7 = use master or not, if 0, the gate is open else val = 1 << 7; if (mst->device_rev > DIB7000) val <<= 1; tx[0] = (((mst->base_reg + 1) >> 8) & 0xff); tx[1] = ((mst->base_reg + 1) & 0xff); tx[2] = val >> 8; tx[3] = val & 0xff; return 0; } static u32 dibx000_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dibx000_i2c_master *mst = i2c_get_adapdata(i2c_adap); struct i2c_msg m[2 + num]; u8 tx_open[4], tx_close[4]; memset(m, 0, sizeof(struct i2c_msg) * (2 + num)); dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); dibx000_i2c_gate_ctrl(mst, tx_open, msg[0].addr, 1); m[0].addr = mst->i2c_addr; m[0].buf = tx_open; m[0].len = 4; memcpy(&m[1], msg, sizeof(struct i2c_msg) * num); dibx000_i2c_gate_ctrl(mst, tx_close, 0, 0); m[num + 1].addr = mst->i2c_addr; m[num + 1].buf = tx_close; m[num + 1].len = 4; return i2c_transfer(mst->i2c_adap, m, 2 + num) == 2 + num ? num : -EIO; } static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = { .master_xfer = dibx000_i2c_gated_tuner_xfer, .functionality = dibx000_i2c_func, }; struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst, enum dibx000_i2c_interface intf, int gating) { struct i2c_adapter *i2c = NULL; switch (intf) { case DIBX000_I2C_INTERFACE_TUNER: if (gating) i2c = &mst->gated_tuner_i2c_adap; break; default: printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n"); break; } return i2c; } EXPORT_SYMBOL(dibx000_get_i2c_adapter); void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst) { /* initialize the i2c-master by closing the gate */ u8 tx[4]; struct i2c_msg m = {.addr = mst->i2c_addr,.buf = tx,.len = 4 }; dibx000_i2c_gate_ctrl(mst, tx, 0, 0); i2c_transfer(mst->i2c_adap, &m, 1); mst->selected_interface = 0xff; // the first time force a select of the I2C dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER); } EXPORT_SYMBOL(dibx000_reset_i2c_master); static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct i2c_algorithm *algo, const char *name, struct dibx000_i2c_master *mst) { strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); i2c_adap->algo = algo; i2c_adap->algo_data = NULL; i2c_set_adapdata(i2c_adap, mst); if (i2c_add_adapter(i2c_adap) < 0) return -ENODEV; return 0; } int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev, struct i2c_adapter *i2c_adap, u8 i2c_addr) { u8 tx[4]; struct i2c_msg m = {.addr = i2c_addr >> 1,.buf = tx,.len = 4 }; mst->device_rev = device_rev; mst->i2c_adap = i2c_adap; mst->i2c_addr = i2c_addr >> 1; if (device_rev == DIB7000P || device_rev == DIB8000) mst->base_reg = 1024; else mst->base_reg = 768; if (i2c_adapter_init (&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo, "DiBX000 tuner I2C bus", mst) != 0) printk(KERN_ERR "DiBX000: could not initialize the tuner i2c_adapter\n"); /* initialize the i2c-master by closing the gate */ dibx000_i2c_gate_ctrl(mst, tx, 0, 0); return i2c_transfer(i2c_adap, &m, 1) == 1; } EXPORT_SYMBOL(dibx000_init_i2c_master); void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst) { i2c_del_adapter(&mst->gated_tuner_i2c_adap); } EXPORT_SYMBOL(dibx000_exit_i2c_master); u32 systime(void) { struct timespec t; t = current_kernel_time(); return (t.tv_sec * 10000) + (t.tv_nsec / 100000); } EXPORT_SYMBOL(systime); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Common function the DiBcom demodulator family"); MODULE_LICENSE("GPL");
gpl-2.0
placiano/NBKernel_NK4
drivers/usb/host/xhci-plat.c
455
6449
/* * xhci-plat.c - xHCI host controller driver platform Bus Glue. * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * A lot of code borrowed from the Linux xHCI driver. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/slab.h> #include "xhci.h" #define SYNOPSIS_DWC3_VENDOR 0x5533 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) { struct xhci_plat_data *pdata = dev->platform_data; /* * As of now platform drivers don't provide MSI support so we ensure * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ xhci->quirks |= XHCI_PLAT; if (!pdata) return; if (pdata->vendor == SYNOPSIS_DWC3_VENDOR && pdata->revision < 0x230A) xhci->quirks |= XHCI_PORTSC_DELAY; if (pdata->vendor == SYNOPSIS_DWC3_VENDOR && pdata->revision <= 0x250A) xhci->quirks |= XHCI_TR_DEQ_ERR_QUIRK; if (pdata->vendor == SYNOPSIS_DWC3_VENDOR && pdata->revision == 0x250A) xhci->quirks |= XHCI_RESET_DELAY; if (pdata->vendor == SYNOPSIS_DWC3_VENDOR && pdata->revision <= 0x230A) xhci->quirks |= XHCI_RESET_RS_ON_RESUME_QUIRK; } /* called during probe() after chip reset completes */ static int xhci_plat_setup(struct usb_hcd *hcd) { return xhci_gen_setup(hcd, xhci_plat_quirks); } static const struct hc_driver xhci_plat_xhci_driver = { .description = "xhci-hcd", .product_desc = "xHCI Host Controller", .hcd_priv_size = sizeof(struct xhci_hcd *), /* * generic hardware linkage */ .irq = xhci_irq, .flags = HCD_MEMORY | HCD_USB3 | HCD_SHARED, /* * basic lifecycle operations */ .reset = xhci_plat_setup, .start = xhci_run, .stop = xhci_stop, .shutdown = xhci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = xhci_urb_enqueue, .urb_dequeue = xhci_urb_dequeue, .alloc_dev = xhci_alloc_dev, .free_dev = xhci_free_dev, .alloc_streams = xhci_alloc_streams, .free_streams = xhci_free_streams, .add_endpoint = xhci_add_endpoint, .drop_endpoint = xhci_drop_endpoint, .endpoint_reset = xhci_endpoint_reset, .check_bandwidth = xhci_check_bandwidth, .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, .update_hub_device = xhci_update_hub_device, .reset_device = xhci_discover_or_reset_device, /* * scheduling support */ .get_frame_number = xhci_get_frame, /* Root hub support */ .hub_control = xhci_hub_control, .hub_status_data = xhci_hub_status_data, .bus_suspend = xhci_bus_suspend, .bus_resume = xhci_bus_resume, }; static int xhci_plat_probe(struct platform_device *pdev) { const struct hc_driver *driver; struct xhci_hcd *xhci; struct resource *res; struct usb_hcd *hcd; int ret; int irq; if (usb_disabled()) return -ENODEV; driver = &xhci_plat_xhci_driver; irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; hcd_to_bus(hcd)->skip_resume = true; hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&pdev->dev, "controller already in use\n"); ret = -EBUSY; goto put_hcd; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_dbg(&pdev->dev, "error mapping memory\n"); ret = -EFAULT; goto release_mem_region; } if (pdev->dev.parent) pm_runtime_resume(pdev->dev.parent); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto unmap_registers; /* USB 2.0 roothub is stored in the platform_device now. */ hcd = dev_get_drvdata(&pdev->dev); xhci = hcd_to_xhci(hcd); xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev, dev_name(&pdev->dev), hcd); if (!xhci->shared_hcd) { ret = -ENOMEM; goto dealloc_usb2_hcd; } hcd_to_bus(xhci->shared_hcd)->skip_resume = true; /* * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset) * is called by usb_add_hcd(). */ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci; ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED); if (ret) goto put_usb3_hcd; pm_runtime_put(&pdev->dev); return 0; put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); dealloc_usb2_hcd: usb_remove_hcd(hcd); unmap_registers: iounmap(hcd->regs); release_mem_region: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); return ret; } static int xhci_plat_remove(struct platform_device *dev) { struct usb_hcd *hcd = platform_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); pm_runtime_disable(&dev->dev); usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); kfree(xhci); return 0; } #ifdef CONFIG_PM_RUNTIME static int xhci_plat_runtime_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); if (!xhci) return 0; dev_dbg(dev, "xhci-plat runtime suspend\n"); return xhci_suspend(xhci); } static int xhci_plat_runtime_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); if (!xhci) return 0; dev_dbg(dev, "xhci-plat runtime resume\n"); return xhci_resume(xhci, false); } #endif static const struct dev_pm_ops xhci_plat_pm_ops = { SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, NULL) }; static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, .driver = { .name = "xhci-hcd", .pm = &xhci_plat_pm_ops, }, }; MODULE_ALIAS("platform:xhci-hcd"); int xhci_register_plat(void) { return platform_driver_register(&usb_xhci_driver); } void xhci_unregister_plat(void) { platform_driver_unregister(&usb_xhci_driver); }
gpl-2.0
facchinm/kernel-brain
fs/xfs/xfs_itable.c
2247
20065
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_ialloc.h" #include "xfs_itable.h" #include "xfs_error.h" #include "xfs_btree.h" #include "xfs_trace.h" #include "xfs_icache.h" STATIC int xfs_internal_inum( xfs_mount_t *mp, xfs_ino_t ino) { return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || (xfs_sb_version_hasquota(&mp->m_sb) && (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino))); } /* * Return stat information for one inode. * Return 0 if ok, else errno. */ int xfs_bulkstat_one_int( struct xfs_mount *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode to get data for */ void __user *buffer, /* buffer to place output in */ int ubsize, /* size of buffer */ bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ int *ubused, /* bytes used by me */ int *stat) /* BULKSTAT_RV_... */ { struct xfs_icdinode *dic; /* dinode core info pointer */ struct xfs_inode *ip; /* incore inode pointer */ struct xfs_bstat *buf; /* return buffer */ int error = 0; /* error value */ *stat = BULKSTAT_RV_NOTHING; if (!buffer || xfs_internal_inum(mp, ino)) return XFS_ERROR(EINVAL); buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); if (!buf) return XFS_ERROR(ENOMEM); error = xfs_iget(mp, NULL, ino, (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), XFS_ILOCK_SHARED, &ip); if (error) { *stat = BULKSTAT_RV_NOTHING; goto out_free; } ASSERT(ip != NULL); ASSERT(ip->i_imap.im_blkno != 0); dic = &ip->i_d; /* xfs_iget returns the following without needing * further change. */ buf->bs_nlink = dic->di_nlink; buf->bs_projid_lo = dic->di_projid_lo; buf->bs_projid_hi = dic->di_projid_hi; buf->bs_ino = ino; buf->bs_mode = dic->di_mode; buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; buf->bs_size = dic->di_size; buf->bs_atime.tv_sec = dic->di_atime.t_sec; buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; buf->bs_xflags = xfs_ip2xflags(ip); buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; buf->bs_extents = dic->di_nextents; buf->bs_gen = dic->di_gen; memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); buf->bs_dmevmask = dic->di_dmevmask; buf->bs_dmstate = dic->di_dmstate; buf->bs_aextents = dic->di_anextents; buf->bs_forkoff = XFS_IFORK_BOFF(ip); switch (dic->di_format) { case XFS_DINODE_FMT_DEV: buf->bs_rdev = ip->i_df.if_u2.if_rdev; buf->bs_blksize = BLKDEV_IOSIZE; buf->bs_blocks = 0; break; case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_UUID: buf->bs_rdev = 0; buf->bs_blksize = mp->m_sb.sb_blocksize; buf->bs_blocks = 0; break; case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: buf->bs_rdev = 0; buf->bs_blksize = mp->m_sb.sb_blocksize; buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; break; } xfs_iunlock(ip, XFS_ILOCK_SHARED); IRELE(ip); error = formatter(buffer, ubsize, ubused, buf); if (!error) *stat = BULKSTAT_RV_DIDONE; out_free: kmem_free(buf); return error; } /* Return 0 on success or positive error */ STATIC int xfs_bulkstat_one_fmt( void __user *ubuffer, int ubsize, int *ubused, const xfs_bstat_t *buffer) { if (ubsize < sizeof(*buffer)) return XFS_ERROR(ENOMEM); if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) return XFS_ERROR(EFAULT); if (ubused) *ubused = sizeof(*buffer); return 0; } int xfs_bulkstat_one( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* buffer to place output in */ int ubsize, /* size of buffer */ int *ubused, /* bytes used by me */ int *stat) /* BULKSTAT_RV_... */ { return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, xfs_bulkstat_one_fmt, ubused, stat); } #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) /* * Return stat information in bulk (by-inode) for the filesystem. */ int /* error status */ xfs_bulkstat( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastinop, /* last inode returned */ int *ubcountp, /* size of buffer/count returned */ bulkstat_one_pf formatter, /* func that'd fill a single buf */ size_t statstruct_size, /* sizeof struct filling */ char __user *ubuffer, /* buffer with inode stats */ int *done) /* 1 if there are more stats to get */ { xfs_agblock_t agbno=0;/* allocation group block number */ xfs_buf_t *agbp; /* agi header buffer */ xfs_agi_t *agi; /* agi header data */ xfs_agino_t agino; /* inode # in allocation group */ xfs_agnumber_t agno; /* allocation group number */ int chunkidx; /* current index into inode chunk */ int clustidx; /* current index into inode cluster */ xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ int end_of_ag; /* set if we've seen the ag end */ int error; /* error code */ int fmterror;/* bulkstat formatter result */ int i; /* loop index */ int icount; /* count of inodes good in irbuf */ size_t irbsize; /* size of irec buffer in bytes */ xfs_ino_t ino; /* inode number (filesystem) */ xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ xfs_ino_t lastino; /* last inode number returned */ int nbcluster; /* # of blocks in a cluster */ int nicluster; /* # of inodes in a cluster */ int nimask; /* mask for inode clusters */ int nirbuf; /* size of irbuf */ int rval; /* return value error code */ int tmp; /* result value from btree calls */ int ubcount; /* size of user's buffer */ int ubleft; /* bytes left in user's buffer */ char __user *ubufp; /* pointer into user's buffer */ int ubelem; /* spaces used in user's buffer */ int ubused; /* bytes used by formatter */ xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ /* * Get the last inode value, see if there's nothing to do. */ ino = (xfs_ino_t)*lastinop; lastino = ino; agno = XFS_INO_TO_AGNO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino); if (agno >= mp->m_sb.sb_agcount || ino != XFS_AGINO_TO_INO(mp, agno, agino)) { *done = 1; *ubcountp = 0; return 0; } if (!ubcountp || *ubcountp <= 0) { return EINVAL; } ubcount = *ubcountp; /* statstruct's */ ubleft = ubcount * statstruct_size; /* bytes */ *ubcountp = ubelem = 0; *done = 0; fmterror = 0; ubufp = ubuffer; nicluster = mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp) ? mp->m_sb.sb_inopblock : (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); nimask = ~(nicluster - 1); nbcluster = nicluster >> mp->m_sb.sb_inopblog; irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); if (!irbuf) return ENOMEM; nirbuf = irbsize / sizeof(*irbuf); /* * Loop over the allocation groups, starting from the last * inode returned; 0 means start of the allocation group. */ rval = 0; while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { cond_resched(); bp = NULL; error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); if (error) { /* * Skip this allocation group and go to the next one. */ agno++; agino = 0; continue; } agi = XFS_BUF_TO_AGI(agbp); /* * Allocate and initialize a btree cursor for ialloc btree. */ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); irbp = irbuf; irbufend = irbuf + nirbuf; end_of_ag = 0; /* * If we're returning in the middle of an allocation group, * we need to get the remainder of the chunk we're in. */ if (agino > 0) { xfs_inobt_rec_incore_t r; /* * Lookup the inode chunk that this inode lives in. */ error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &tmp); if (!error && /* no I/O error */ tmp && /* lookup succeeded */ /* got the record, should always work */ !(error = xfs_inobt_get_rec(cur, &r, &i)) && i == 1 && /* this is the right chunk */ agino < r.ir_startino + XFS_INODES_PER_CHUNK && /* lastino was not last in chunk */ (chunkidx = agino - r.ir_startino + 1) < XFS_INODES_PER_CHUNK && /* there are some left allocated */ xfs_inobt_maskn(chunkidx, XFS_INODES_PER_CHUNK - chunkidx) & ~r.ir_free) { /* * Grab the chunk record. Mark all the * uninteresting inodes (because they're * before our start point) free. */ for (i = 0; i < chunkidx; i++) { if (XFS_INOBT_MASK(i) & ~r.ir_free) r.ir_freecount++; } r.ir_free |= xfs_inobt_maskn(0, chunkidx); irbp->ir_startino = r.ir_startino; irbp->ir_freecount = r.ir_freecount; irbp->ir_free = r.ir_free; irbp++; agino = r.ir_startino + XFS_INODES_PER_CHUNK; icount = XFS_INODES_PER_CHUNK - r.ir_freecount; } else { /* * If any of those tests failed, bump the * inode number (just in case). */ agino++; icount = 0; } /* * In any case, increment to the next record. */ if (!error) error = xfs_btree_increment(cur, 0, &tmp); } else { /* * Start of ag. Lookup the first inode chunk. */ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); icount = 0; } /* * Loop through inode btree records in this ag, * until we run out of inodes or space in the buffer. */ while (irbp < irbufend && icount < ubcount) { xfs_inobt_rec_incore_t r; /* * Loop as long as we're unable to read the * inode btree. */ while (error) { agino += XFS_INODES_PER_CHUNK; if (XFS_AGINO_TO_AGBNO(mp, agino) >= be32_to_cpu(agi->agi_length)) break; error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, &tmp); cond_resched(); } /* * If ran off the end of the ag either with an error, * or the normal way, set end and stop collecting. */ if (error) { end_of_ag = 1; break; } error = xfs_inobt_get_rec(cur, &r, &i); if (error || i == 0) { end_of_ag = 1; break; } /* * If this chunk has any allocated inodes, save it. * Also start read-ahead now for this chunk. */ if (r.ir_freecount < XFS_INODES_PER_CHUNK) { /* * Loop over all clusters in the next chunk. * Do a readahead if there are any allocated * inodes in that cluster. */ agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; chunkidx += nicluster, agbno += nbcluster) { if (xfs_inobt_maskn(chunkidx, nicluster) & ~r.ir_free) xfs_btree_reada_bufs(mp, agno, agbno, nbcluster, &xfs_inode_buf_ops); } irbp->ir_startino = r.ir_startino; irbp->ir_freecount = r.ir_freecount; irbp->ir_free = r.ir_free; irbp++; icount += XFS_INODES_PER_CHUNK - r.ir_freecount; } /* * Set agino to after this chunk and bump the cursor. */ agino = r.ir_startino + XFS_INODES_PER_CHUNK; error = xfs_btree_increment(cur, 0, &tmp); cond_resched(); } /* * Drop the btree buffers and the agi buffer. * We can't hold any of the locks these represent * when calling iget. */ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_buf_relse(agbp); /* * Now format all the good inodes into the user's buffer. */ irbufend = irbp; for (irbp = irbuf; irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { /* * Now process this chunk of inodes. */ for (agino = irbp->ir_startino, chunkidx = clustidx = 0; XFS_BULKSTAT_UBLEFT(ubleft) && irbp->ir_freecount < XFS_INODES_PER_CHUNK; chunkidx++, clustidx++, agino++) { ASSERT(chunkidx < XFS_INODES_PER_CHUNK); /* * Recompute agbno if this is the * first inode of the cluster. * * Careful with clustidx. There can be * multiple clusters per chunk, a single * cluster per chunk or a cluster that has * inodes represented from several different * chunks (if blocksize is large). * * Because of this, the starting clustidx is * initialized to zero in this loop but must * later be reset after reading in the cluster * buffer. */ if ((chunkidx & (nicluster - 1)) == 0) { agbno = XFS_AGINO_TO_AGBNO(mp, irbp->ir_startino) + ((chunkidx & nimask) >> mp->m_sb.sb_inopblog); } ino = XFS_AGINO_TO_INO(mp, agno, agino); /* * Skip if this inode is free. */ if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { lastino = ino; continue; } /* * Count used inodes as free so we can tell * when the chunk is used up. */ irbp->ir_freecount++; /* * Get the inode and fill in a single buffer. */ ubused = statstruct_size; error = formatter(mp, ino, ubufp, ubleft, &ubused, &fmterror); if (fmterror == BULKSTAT_RV_NOTHING) { if (error && error != ENOENT && error != EINVAL) { ubleft = 0; rval = error; break; } lastino = ino; continue; } if (fmterror == BULKSTAT_RV_GIVEUP) { ubleft = 0; ASSERT(error); rval = error; break; } if (ubufp) ubufp += ubused; ubleft -= ubused; ubelem++; lastino = ino; } cond_resched(); } if (bp) xfs_buf_relse(bp); /* * Set up for the next loop iteration. */ if (XFS_BULKSTAT_UBLEFT(ubleft)) { if (end_of_ag) { agno++; agino = 0; } else agino = XFS_INO_TO_AGINO(mp, lastino); } else break; } /* * Done, we're either out of filesystem or space to put the data. */ kmem_free_large(irbuf); *ubcountp = ubelem; /* * Found some inodes, return them now and return the error next time. */ if (ubelem) rval = 0; if (agno >= mp->m_sb.sb_agcount) { /* * If we ran out of filesystem, mark lastino as off * the end of the filesystem, so the next call * will return immediately. */ *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); *done = 1; } else *lastinop = (xfs_ino_t)lastino; return rval; } /* * Return stat information in bulk (by-inode) for the filesystem. * Special case for non-sequential one inode bulkstat. */ int /* error status */ xfs_bulkstat_single( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastinop, /* inode to return */ char __user *buffer, /* buffer with inode stats */ int *done) /* 1 if there are more stats to get */ { int count; /* count value for bulkstat call */ int error; /* return value */ xfs_ino_t ino; /* filesystem inode number */ int res; /* result from bs1 */ /* * note that requesting valid inode numbers which are not allocated * to inodes will most likely cause xfs_imap_to_bp to generate warning * messages about bad magic numbers. This is ok. The fact that * the inode isn't actually an inode is handled by the * error check below. Done this way to make the usual case faster * at the expense of the error case. */ ino = (xfs_ino_t)*lastinop; error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 0, &res); if (error) { /* * Special case way failed, do it the "long" way * to see if that works. */ (*lastinop)--; count = 1; if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, sizeof(xfs_bstat_t), buffer, done)) return error; if (count == 0 || (xfs_ino_t)*lastinop != ino) return error == EFSCORRUPTED ? XFS_ERROR(EINVAL) : error; else return 0; } *done = 0; return 0; } int xfs_inumbers_fmt( void __user *ubuffer, /* buffer to write to */ const xfs_inogrp_t *buffer, /* buffer to read from */ long count, /* # of elements to read */ long *written) /* # of bytes written */ { if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) return -EFAULT; *written = count * sizeof(*buffer); return 0; } /* * Return inode number table for the filesystem. */ int /* error status */ xfs_inumbers( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t *lastino, /* last inode returned */ int *count, /* size of buffer/count returned */ void __user *ubuffer,/* buffer with inode descriptions */ inumbers_fmt_pf formatter) { xfs_buf_t *agbp; xfs_agino_t agino; xfs_agnumber_t agno; int bcount; xfs_inogrp_t *buffer; int bufidx; xfs_btree_cur_t *cur; int error; xfs_inobt_rec_incore_t r; int i; xfs_ino_t ino; int left; int tmp; ino = (xfs_ino_t)*lastino; agno = XFS_INO_TO_AGNO(mp, ino); agino = XFS_INO_TO_AGINO(mp, ino); left = *count; *count = 0; bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); error = bufidx = 0; cur = NULL; agbp = NULL; while (left > 0 && agno < mp->m_sb.sb_agcount) { if (agbp == NULL) { error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); if (error) { /* * If we can't read the AGI of this ag, * then just skip to the next one. */ ASSERT(cur == NULL); agbp = NULL; agno++; agino = 0; continue; } cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; xfs_buf_relse(agbp); agbp = NULL; /* * Move up the last inode in the current * chunk. The lookup_ge will always get * us the first inode in the next chunk. */ agino += XFS_INODES_PER_CHUNK - 1; continue; } } error = xfs_inobt_get_rec(cur, &r, &i); if (error || i == 0) { xfs_buf_relse(agbp); agbp = NULL; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); cur = NULL; agno++; agino = 0; continue; } agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, r.ir_startino); buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - r.ir_freecount; buffer[bufidx].xi_allocmask = ~r.ir_free; bufidx++; left--; if (bufidx == bcount) { long written; if (formatter(ubuffer, buffer, bufidx, &written)) { error = XFS_ERROR(EFAULT); break; } ubuffer += written; *count += bufidx; bufidx = 0; } if (left) { error = xfs_btree_increment(cur, 0, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; xfs_buf_relse(agbp); agbp = NULL; /* * The agino value has already been bumped. * Just try to skip up to it. */ agino += XFS_INODES_PER_CHUNK; continue; } } } if (!error) { if (bufidx) { long written; if (formatter(ubuffer, buffer, bufidx, &written)) error = XFS_ERROR(EFAULT); else *count += bufidx; } *lastino = XFS_AGINO_TO_INO(mp, agno, agino); } kmem_free(buffer); if (cur) xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR)); if (agbp) xfs_buf_relse(agbp); return error; }
gpl-2.0
RadiumBot/Radium_tomato
drivers/staging/iio/adc/ad7816.c
2247
11406
/* * AD7816 digital temperature sensor driver supporting AD7816/7/8 * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/spi/spi.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/events.h> /* * AD7816 config masks */ #define AD7816_FULL 0x1 #define AD7816_PD 0x2 #define AD7816_CS_MASK 0x7 #define AD7816_CS_MAX 0x4 /* * AD7816 temperature masks */ #define AD7816_VALUE_OFFSET 6 #define AD7816_BOUND_VALUE_BASE 0x8 #define AD7816_BOUND_VALUE_MIN -95 #define AD7816_BOUND_VALUE_MAX 152 #define AD7816_TEMP_FLOAT_OFFSET 2 #define AD7816_TEMP_FLOAT_MASK 0x3 /* * struct ad7816_chip_info - chip specifc information */ struct ad7816_chip_info { struct spi_device *spi_dev; u16 rdwr_pin; u16 convert_pin; u16 busy_pin; u8 oti_data[AD7816_CS_MAX+1]; u8 channel_id; /* 0 always be temperature */ u8 mode; }; /* * ad7816 data access by SPI */ static int ad7816_spi_read(struct ad7816_chip_info *chip, u16 *data) { struct spi_device *spi_dev = chip->spi_dev; int ret = 0; gpio_set_value(chip->rdwr_pin, 1); gpio_set_value(chip->rdwr_pin, 0); ret = spi_write(spi_dev, &chip->channel_id, sizeof(chip->channel_id)); if (ret < 0) { dev_err(&spi_dev->dev, "SPI channel setting error\n"); return ret; } gpio_set_value(chip->rdwr_pin, 1); if (chip->mode == AD7816_PD) { /* operating mode 2 */ gpio_set_value(chip->convert_pin, 1); gpio_set_value(chip->convert_pin, 0); } else { /* operating mode 1 */ gpio_set_value(chip->convert_pin, 0); gpio_set_value(chip->convert_pin, 1); } while (gpio_get_value(chip->busy_pin)) cpu_relax(); gpio_set_value(chip->rdwr_pin, 0); gpio_set_value(chip->rdwr_pin, 1); ret = spi_read(spi_dev, (u8 *)data, sizeof(*data)); if (ret < 0) { dev_err(&spi_dev->dev, "SPI data read error\n"); return ret; } *data = be16_to_cpu(*data); return ret; } static int ad7816_spi_write(struct ad7816_chip_info *chip, u8 data) { struct spi_device *spi_dev = chip->spi_dev; int ret = 0; gpio_set_value(chip->rdwr_pin, 1); gpio_set_value(chip->rdwr_pin, 0); ret = spi_write(spi_dev, &data, sizeof(data)); if (ret < 0) dev_err(&spi_dev->dev, "SPI oti data write error\n"); return ret; } static ssize_t ad7816_show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); if (chip->mode) return sprintf(buf, "power-save\n"); else return sprintf(buf, "full\n"); } static ssize_t ad7816_store_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); if (strcmp(buf, "full")) { gpio_set_value(chip->rdwr_pin, 1); chip->mode = AD7816_FULL; } else { gpio_set_value(chip->rdwr_pin, 0); chip->mode = AD7816_PD; } return len; } static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, ad7816_show_mode, ad7816_store_mode, 0); static ssize_t ad7816_show_available_modes(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "full\npower-save\n"); } static IIO_DEVICE_ATTR(available_modes, S_IRUGO, ad7816_show_available_modes, NULL, 0); static ssize_t ad7816_show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); return sprintf(buf, "%d\n", chip->channel_id); } static ssize_t ad7816_store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); unsigned long data; int ret; ret = strict_strtoul(buf, 10, &data); if (ret) return -EINVAL; if (data > AD7816_CS_MAX && data != AD7816_CS_MASK) { dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for %s.\n", data, indio_dev->name); return -EINVAL; } else if (strcmp(indio_dev->name, "ad7818") == 0 && data > 1) { dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for ad7818.\n", data); return -EINVAL; } else if (strcmp(indio_dev->name, "ad7816") == 0 && data > 0) { dev_err(&chip->spi_dev->dev, "Invalid channel id %lu for ad7816.\n", data); return -EINVAL; } chip->channel_id = data; return len; } static IIO_DEVICE_ATTR(channel, S_IRUGO | S_IWUSR, ad7816_show_channel, ad7816_store_channel, 0); static ssize_t ad7816_show_value(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); u16 data; s8 value; int ret; ret = ad7816_spi_read(chip, &data); if (ret) return -EIO; data >>= AD7816_VALUE_OFFSET; if (chip->channel_id == 0) { value = (s8)((data >> AD7816_TEMP_FLOAT_OFFSET) - 103); data &= AD7816_TEMP_FLOAT_MASK; if (value < 0) data = (1 << AD7816_TEMP_FLOAT_OFFSET) - data; return sprintf(buf, "%d.%.2d\n", value, data * 25); } else return sprintf(buf, "%u\n", data); } static IIO_DEVICE_ATTR(value, S_IRUGO, ad7816_show_value, NULL, 0); static struct attribute *ad7816_attributes[] = { &iio_dev_attr_available_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_channel.dev_attr.attr, &iio_dev_attr_value.dev_attr.attr, NULL, }; static const struct attribute_group ad7816_attribute_group = { .attrs = ad7816_attributes, }; /* * temperature bound events */ #define IIO_EVENT_CODE_AD7816_OTI IIO_UNMOD_EVENT_CODE(IIO_TEMP, \ 0, \ IIO_EV_TYPE_THRESH, \ IIO_EV_DIR_FALLING) static irqreturn_t ad7816_event_handler(int irq, void *private) { iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns()); return IRQ_HANDLED; } static ssize_t ad7816_show_oti(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); int value; if (chip->channel_id > AD7816_CS_MAX) { dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id); return -EINVAL; } else if (chip->channel_id == 0) { value = AD7816_BOUND_VALUE_MIN + (chip->oti_data[chip->channel_id] - AD7816_BOUND_VALUE_BASE); return sprintf(buf, "%d\n", value); } else return sprintf(buf, "%u\n", chip->oti_data[chip->channel_id]); } static inline ssize_t ad7816_set_oti(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); long value; u8 data; int ret; ret = strict_strtol(buf, 10, &value); if (chip->channel_id > AD7816_CS_MAX) { dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id); return -EINVAL; } else if (chip->channel_id == 0) { if (ret || value < AD7816_BOUND_VALUE_MIN || value > AD7816_BOUND_VALUE_MAX) return -EINVAL; data = (u8)(value - AD7816_BOUND_VALUE_MIN + AD7816_BOUND_VALUE_BASE); } else { if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255) return -EINVAL; data = (u8)value; } ret = ad7816_spi_write(chip, data); if (ret) return -EIO; chip->oti_data[chip->channel_id] = data; return len; } static IIO_DEVICE_ATTR(oti, S_IRUGO | S_IWUSR, ad7816_show_oti, ad7816_set_oti, 0); static struct attribute *ad7816_event_attributes[] = { &iio_dev_attr_oti.dev_attr.attr, NULL, }; static struct attribute_group ad7816_event_attribute_group = { .attrs = ad7816_event_attributes, .name = "events", }; static const struct iio_info ad7816_info = { .attrs = &ad7816_attribute_group, .event_attrs = &ad7816_event_attribute_group, .driver_module = THIS_MODULE, }; /* * device probe and remove */ static int ad7816_probe(struct spi_device *spi_dev) { struct ad7816_chip_info *chip; struct iio_dev *indio_dev; unsigned short *pins = spi_dev->dev.platform_data; int ret = 0; int i; if (!pins) { dev_err(&spi_dev->dev, "No necessary GPIO platform data.\n"); return -EINVAL; } indio_dev = iio_device_alloc(sizeof(*chip)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } chip = iio_priv(indio_dev); /* this is only used for device removal purposes */ dev_set_drvdata(&spi_dev->dev, indio_dev); chip->spi_dev = spi_dev; for (i = 0; i <= AD7816_CS_MAX; i++) chip->oti_data[i] = 203; chip->rdwr_pin = pins[0]; chip->convert_pin = pins[1]; chip->busy_pin = pins[2]; ret = gpio_request(chip->rdwr_pin, spi_get_device_id(spi_dev)->name); if (ret) { dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n", chip->rdwr_pin); goto error_free_device; } gpio_direction_input(chip->rdwr_pin); ret = gpio_request(chip->convert_pin, spi_get_device_id(spi_dev)->name); if (ret) { dev_err(&spi_dev->dev, "Fail to request convert gpio PIN %d.\n", chip->convert_pin); goto error_free_gpio_rdwr; } gpio_direction_input(chip->convert_pin); ret = gpio_request(chip->busy_pin, spi_get_device_id(spi_dev)->name); if (ret) { dev_err(&spi_dev->dev, "Fail to request busy gpio PIN %d.\n", chip->busy_pin); goto error_free_gpio_convert; } gpio_direction_input(chip->busy_pin); indio_dev->name = spi_get_device_id(spi_dev)->name; indio_dev->dev.parent = &spi_dev->dev; indio_dev->info = &ad7816_info; indio_dev->modes = INDIO_DIRECT_MODE; if (spi_dev->irq) { /* Only low trigger is supported in ad7816/7/8 */ ret = request_threaded_irq(spi_dev->irq, NULL, &ad7816_event_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, indio_dev->name, indio_dev); if (ret) goto error_free_gpio; } ret = iio_device_register(indio_dev); if (ret) goto error_free_irq; dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n", indio_dev->name); return 0; error_free_irq: free_irq(spi_dev->irq, indio_dev); error_free_gpio: gpio_free(chip->busy_pin); error_free_gpio_convert: gpio_free(chip->convert_pin); error_free_gpio_rdwr: gpio_free(chip->rdwr_pin); error_free_device: iio_device_free(indio_dev); error_ret: return ret; } static int ad7816_remove(struct spi_device *spi_dev) { struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); iio_device_unregister(indio_dev); dev_set_drvdata(&spi_dev->dev, NULL); if (spi_dev->irq) free_irq(spi_dev->irq, indio_dev); gpio_free(chip->busy_pin); gpio_free(chip->convert_pin); gpio_free(chip->rdwr_pin); iio_device_free(indio_dev); return 0; } static const struct spi_device_id ad7816_id[] = { { "ad7816", 0 }, { "ad7817", 0 }, { "ad7818", 0 }, {} }; MODULE_DEVICE_TABLE(spi, ad7816_id); static struct spi_driver ad7816_driver = { .driver = { .name = "ad7816", .owner = THIS_MODULE, }, .probe = ad7816_probe, .remove = ad7816_remove, .id_table = ad7816_id, }; module_spi_driver(ad7816_driver); MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital" " temperature sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zhaochengw/android_kernel_nx403
drivers/switch/switch_class.c
4039
4393
/* * drivers/switch/switch_class.c * * Copyright (C) 2008 Google, Inc. * Author: Mike Lockwood <lockwood@android.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/err.h> #include <linux/switch.h> struct class *switch_class; static atomic_t device_count; static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switch_dev *sdev = (struct switch_dev *) dev_get_drvdata(dev); if (sdev->print_state) { int ret = sdev->print_state(sdev, buf); if (ret >= 0) return ret; } return sprintf(buf, "%d\n", sdev->state); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switch_dev *sdev = (struct switch_dev *) dev_get_drvdata(dev); if (sdev->print_name) { int ret = sdev->print_name(sdev, buf); if (ret >= 0) return ret; } return sprintf(buf, "%s\n", sdev->name); } static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL); static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL); void switch_set_state(struct switch_dev *sdev, int state) { char name_buf[120]; char state_buf[120]; char *prop_buf; char *envp[3]; int env_offset = 0; int length; if (sdev->state != state) { sdev->state = state; prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (prop_buf) { length = name_show(sdev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(name_buf, sizeof(name_buf), "SWITCH_NAME=%s", prop_buf); envp[env_offset++] = name_buf; } length = state_show(sdev->dev, NULL, prop_buf); if (length > 0) { if (prop_buf[length - 1] == '\n') prop_buf[length - 1] = 0; snprintf(state_buf, sizeof(state_buf), "SWITCH_STATE=%s", prop_buf); envp[env_offset++] = state_buf; } envp[env_offset] = NULL; kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp); free_page((unsigned long)prop_buf); } else { printk(KERN_ERR "out of memory in switch_set_state\n"); kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE); } } } EXPORT_SYMBOL_GPL(switch_set_state); static int create_switch_class(void) { if (!switch_class) { switch_class = class_create(THIS_MODULE, "switch"); if (IS_ERR(switch_class)) return PTR_ERR(switch_class); atomic_set(&device_count, 0); } return 0; } int switch_dev_register(struct switch_dev *sdev) { int ret; if (!switch_class) { ret = create_switch_class(); if (ret < 0) return ret; } sdev->index = atomic_inc_return(&device_count); sdev->dev = device_create(switch_class, NULL, MKDEV(0, sdev->index), NULL, sdev->name); if (IS_ERR(sdev->dev)) return PTR_ERR(sdev->dev); ret = device_create_file(sdev->dev, &dev_attr_state); if (ret < 0) goto err_create_file_1; ret = device_create_file(sdev->dev, &dev_attr_name); if (ret < 0) goto err_create_file_2; dev_set_drvdata(sdev->dev, sdev); sdev->state = 0; return 0; err_create_file_2: device_remove_file(sdev->dev, &dev_attr_state); err_create_file_1: device_destroy(switch_class, MKDEV(0, sdev->index)); printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name); return ret; } EXPORT_SYMBOL_GPL(switch_dev_register); void switch_dev_unregister(struct switch_dev *sdev) { device_remove_file(sdev->dev, &dev_attr_name); device_remove_file(sdev->dev, &dev_attr_state); device_destroy(switch_class, MKDEV(0, sdev->index)); dev_set_drvdata(sdev->dev, NULL); } EXPORT_SYMBOL_GPL(switch_dev_unregister); static int __init switch_class_init(void) { return create_switch_class(); } static void __exit switch_class_exit(void) { class_destroy(switch_class); } module_init(switch_class_init); module_exit(switch_class_exit); MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); MODULE_DESCRIPTION("Switch class driver"); MODULE_LICENSE("GPL");
gpl-2.0
ignacio28/android_kernel_lge_msm8610
drivers/char/hw_random/omap-rng.c
4807
5039
/* * omap-rng.c - RNG driver for TI OMAP CPU family * * Author: Deepak Saxena <dsaxena@plexity.net> * * Copyright 2005 (c) MontaVista Software, Inc. * * Mostly based on original driver: * * Copyright (C) 2005 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/init.h> #include <linux/random.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <asm/io.h> #include <plat/cpu.h> #define RNG_OUT_REG 0x00 /* Output register */ #define RNG_STAT_REG 0x04 /* Status register [0] = STAT_BUSY */ #define RNG_ALARM_REG 0x24 /* Alarm register [7:0] = ALARM_COUNTER */ #define RNG_CONFIG_REG 0x28 /* Configuration register [11:6] = RESET_COUNT [5:3] = RING2_DELAY [2:0] = RING1_DELAY */ #define RNG_REV_REG 0x3c /* Revision register [7:0] = REV_NB */ #define RNG_MASK_REG 0x40 /* Mask and reset register [2] = IT_EN [1] = SOFTRESET [0] = AUTOIDLE */ #define RNG_SYSSTATUS 0x44 /* System status [0] = RESETDONE */ static void __iomem *rng_base; static struct clk *rng_ick; static struct platform_device *rng_dev; static inline u32 omap_rng_read_reg(int reg) { return __raw_readl(rng_base + reg); } static inline void omap_rng_write_reg(int reg, u32 val) { __raw_writel(val, rng_base + reg); } static int omap_rng_data_present(struct hwrng *rng, int wait) { int data, i; for (i = 0; i < 20; i++) { data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; if (data || !wait) break; /* RNG produces data fast enough (2+ MBit/sec, even * during "rngtest" loads, that these delays don't * seem to trigger. We *could* use the RNG IRQ, but * that'd be higher overhead ... so why bother? */ udelay(10); } return data; } static int omap_rng_data_read(struct hwrng *rng, u32 *data) { *data = omap_rng_read_reg(RNG_OUT_REG); return 4; } static struct hwrng omap_rng_ops = { .name = "omap", .data_present = omap_rng_data_present, .data_read = omap_rng_data_read, }; static int __devinit omap_rng_probe(struct platform_device *pdev) { struct resource *res; int ret; /* * A bit ugly, and it will never actually happen but there can * be only one RNG and this catches any bork */ if (rng_dev) return -EBUSY; if (cpu_is_omap24xx()) { rng_ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_ick)) { dev_err(&pdev->dev, "Could not get rng_ick\n"); ret = PTR_ERR(rng_ick); return ret; } else clk_enable(rng_ick); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENOENT; goto err_region; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { ret = -EBUSY; goto err_region; } dev_set_drvdata(&pdev->dev, res); rng_base = ioremap(res->start, resource_size(res)); if (!rng_base) { ret = -ENOMEM; goto err_ioremap; } ret = hwrng_register(&omap_rng_ops); if (ret) goto err_register; dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", omap_rng_read_reg(RNG_REV_REG)); omap_rng_write_reg(RNG_MASK_REG, 0x1); rng_dev = pdev; return 0; err_register: iounmap(rng_base); rng_base = NULL; err_ioremap: release_mem_region(res->start, resource_size(res)); err_region: if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } return ret; } static int __exit omap_rng_remove(struct platform_device *pdev) { struct resource *res = dev_get_drvdata(&pdev->dev); hwrng_unregister(&omap_rng_ops); omap_rng_write_reg(RNG_MASK_REG, 0x0); iounmap(rng_base); if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } release_mem_region(res->start, resource_size(res)); rng_base = NULL; return 0; } #ifdef CONFIG_PM static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) { omap_rng_write_reg(RNG_MASK_REG, 0x0); return 0; } static int omap_rng_resume(struct platform_device *pdev) { omap_rng_write_reg(RNG_MASK_REG, 0x1); return 0; } #else #define omap_rng_suspend NULL #define omap_rng_resume NULL #endif /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap_rng"); static struct platform_driver omap_rng_driver = { .driver = { .name = "omap_rng", .owner = THIS_MODULE, }, .probe = omap_rng_probe, .remove = __exit_p(omap_rng_remove), .suspend = omap_rng_suspend, .resume = omap_rng_resume }; static int __init omap_rng_init(void) { if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) return -ENODEV; return platform_driver_register(&omap_rng_driver); } static void __exit omap_rng_exit(void) { platform_driver_unregister(&omap_rng_driver); } module_init(omap_rng_init); module_exit(omap_rng_exit); MODULE_AUTHOR("Deepak Saxena (and others)"); MODULE_LICENSE("GPL");
gpl-2.0
CallMeAldy/AK-Mako
drivers/s390/crypto/zcrypt_cex2a.c
4807
15459
/* * linux/drivers/s390/crypto/zcrypt_cex2a.c * * zcrypt 2.1.0 * * Copyright (C) 2001, 2006 IBM Corporation * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <linux/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" #include "zcrypt_api.h" #include "zcrypt_error.h" #include "zcrypt_cex2a.h" #define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ #define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ #define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE #define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */ #define CEX2A_SPEED_RATING 970 #define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */ #define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ #define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ #define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus * (max outputdatalength) + * type80_hdr*/ #define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg) #define CEX2A_CLEANUP_TIME (15*HZ) #define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME static struct ap_device_id zcrypt_cex2a_ids[] = { { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " "Copyright 2001, 2006 IBM Corporation"); MODULE_LICENSE("GPL"); static int zcrypt_cex2a_probe(struct ap_device *ap_dev); static void zcrypt_cex2a_remove(struct ap_device *ap_dev); static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, struct ap_message *); static struct ap_driver zcrypt_cex2a_driver = { .probe = zcrypt_cex2a_probe, .remove = zcrypt_cex2a_remove, .receive = zcrypt_cex2a_receive, .ids = zcrypt_cex2a_ids, .request_timeout = CEX2A_CLEANUP_TIME, }; /** * Convert a ICAMEX message to a type50 MEX message. * * @zdev: crypto device pointer * @zreq: crypto request pointer * @mex: pointer to user input data * * Returns 0 on success or -EFAULT. */ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, struct ap_message *ap_msg, struct ica_rsa_modexpo *mex) { unsigned char *mod, *exp, *inp; int mod_len; mod_len = mex->inputdatalength; if (mod_len <= 128) { struct type50_meb1_msg *meb1 = ap_msg->message; memset(meb1, 0, sizeof(*meb1)); ap_msg->length = sizeof(*meb1); meb1->header.msg_type_code = TYPE50_TYPE_CODE; meb1->header.msg_len = sizeof(*meb1); meb1->keyblock_type = TYPE50_MEB1_FMT; mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; inp = meb1->message + sizeof(meb1->message) - mod_len; } else if (mod_len <= 256) { struct type50_meb2_msg *meb2 = ap_msg->message; memset(meb2, 0, sizeof(*meb2)); ap_msg->length = sizeof(*meb2); meb2->header.msg_type_code = TYPE50_TYPE_CODE; meb2->header.msg_len = sizeof(*meb2); meb2->keyblock_type = TYPE50_MEB2_FMT; mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; inp = meb2->message + sizeof(meb2->message) - mod_len; } else { /* mod_len > 256 = 4096 bit RSA Key */ struct type50_meb3_msg *meb3 = ap_msg->message; memset(meb3, 0, sizeof(*meb3)); ap_msg->length = sizeof(*meb3); meb3->header.msg_type_code = TYPE50_TYPE_CODE; meb3->header.msg_len = sizeof(*meb3); meb3->keyblock_type = TYPE50_MEB3_FMT; mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; inp = meb3->message + sizeof(meb3->message) - mod_len; } if (copy_from_user(mod, mex->n_modulus, mod_len) || copy_from_user(exp, mex->b_key, mod_len) || copy_from_user(inp, mex->inputdata, mod_len)) return -EFAULT; return 0; } /** * Convert a ICACRT message to a type50 CRT message. * * @zdev: crypto device pointer * @zreq: crypto request pointer * @crt: pointer to user input data * * Returns 0 on success or -EFAULT. */ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, struct ap_message *ap_msg, struct ica_rsa_modexpo_crt *crt) { int mod_len, short_len, long_len, long_offset, limit; unsigned char *p, *q, *dp, *dq, *u, *inp; mod_len = crt->inputdatalength; short_len = mod_len / 2; long_len = mod_len / 2 + 8; /* * CEX2A cannot handle p, dp, or U > 128 bytes. * If we have one of these, we need to do extra checking. * For CEX3A the limit is 256 bytes. */ if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE) limit = 256; else limit = 128; if (long_len > limit) { /* * zcrypt_rsa_crt already checked for the leading * zeroes of np_prime, bp_key and u_mult_inc. */ long_offset = long_len - limit; long_len = limit; } else long_offset = 0; /* * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use * the larger message structure. */ if (long_len <= 64) { struct type50_crb1_msg *crb1 = ap_msg->message; memset(crb1, 0, sizeof(*crb1)); ap_msg->length = sizeof(*crb1); crb1->header.msg_type_code = TYPE50_TYPE_CODE; crb1->header.msg_len = sizeof(*crb1); crb1->keyblock_type = TYPE50_CRB1_FMT; p = crb1->p + sizeof(crb1->p) - long_len; q = crb1->q + sizeof(crb1->q) - short_len; dp = crb1->dp + sizeof(crb1->dp) - long_len; dq = crb1->dq + sizeof(crb1->dq) - short_len; u = crb1->u + sizeof(crb1->u) - long_len; inp = crb1->message + sizeof(crb1->message) - mod_len; } else if (long_len <= 128) { struct type50_crb2_msg *crb2 = ap_msg->message; memset(crb2, 0, sizeof(*crb2)); ap_msg->length = sizeof(*crb2); crb2->header.msg_type_code = TYPE50_TYPE_CODE; crb2->header.msg_len = sizeof(*crb2); crb2->keyblock_type = TYPE50_CRB2_FMT; p = crb2->p + sizeof(crb2->p) - long_len; q = crb2->q + sizeof(crb2->q) - short_len; dp = crb2->dp + sizeof(crb2->dp) - long_len; dq = crb2->dq + sizeof(crb2->dq) - short_len; u = crb2->u + sizeof(crb2->u) - long_len; inp = crb2->message + sizeof(crb2->message) - mod_len; } else { /* long_len >= 256 */ struct type50_crb3_msg *crb3 = ap_msg->message; memset(crb3, 0, sizeof(*crb3)); ap_msg->length = sizeof(*crb3); crb3->header.msg_type_code = TYPE50_TYPE_CODE; crb3->header.msg_len = sizeof(*crb3); crb3->keyblock_type = TYPE50_CRB3_FMT; p = crb3->p + sizeof(crb3->p) - long_len; q = crb3->q + sizeof(crb3->q) - short_len; dp = crb3->dp + sizeof(crb3->dp) - long_len; dq = crb3->dq + sizeof(crb3->dq) - short_len; u = crb3->u + sizeof(crb3->u) - long_len; inp = crb3->message + sizeof(crb3->message) - mod_len; } if (copy_from_user(p, crt->np_prime + long_offset, long_len) || copy_from_user(q, crt->nq_prime, short_len) || copy_from_user(dp, crt->bp_key + long_offset, long_len) || copy_from_user(dq, crt->bq_key, short_len) || copy_from_user(u, crt->u_mult_inv + long_offset, long_len) || copy_from_user(inp, crt->inputdata, mod_len)) return -EFAULT; return 0; } /** * Copy results from a type 80 reply message back to user space. * * @zdev: crypto device pointer * @reply: reply AP message. * @data: pointer to user output data * @length: size of user output data * * Returns 0 on success or -EFAULT. */ static int convert_type80(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { struct type80_hdr *t80h = reply->message; unsigned char *data; if (t80h->len < sizeof(*t80h) + outputdatalength) { /* The result is too short, the CEX2A card may not do that.. */ zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } if (zdev->user_space_type == ZCRYPT_CEX2A) BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); else BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE); data = reply->message + t80h->len - outputdatalength; if (copy_to_user(outputdata, data, outputdatalength)) return -EFAULT; return 0; } static int convert_response(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) { /* Response type byte is the second byte in the response. */ switch (((unsigned char *) reply->message)[1]) { case TYPE82_RSP_CODE: case TYPE88_RSP_CODE: return convert_error(zdev, reply); case TYPE80_RSP_CODE: return convert_type80(zdev, reply, outputdata, outputdatalength); default: /* Unknown response type, this should NEVER EVER happen */ zdev->online = 0; return -EAGAIN; /* repeat the request on a different device. */ } } /** * This function is called from the AP bus code after a crypto request * "msg" has finished with the reply message "reply". * It is called from tasklet context. * @ap_dev: pointer to the AP device * @msg: pointer to the AP message * @reply: pointer to the AP reply message */ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) { static struct error_hdr error_reply = { .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; struct type80_hdr *t80h; int length; /* Copy the reply message to the request message buffer. */ if (IS_ERR(reply)) { memcpy(msg->message, &error_reply, sizeof(error_reply)); goto out; } t80h = reply->message; if (t80h->type == TYPE80_RSP_CODE) { if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A) length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); else length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len); memcpy(msg->message, reply->message, length); } else memcpy(msg->message, reply->message, sizeof error_reply); out: complete((struct completion *) msg->private); } static atomic_t zcrypt_step = ATOMIC_INIT(0); /** * The request distributor calls this function if it picked the CEX2A * device to handle a modexpo request. * @zdev: pointer to zcrypt_device structure that identifies the * CEX2A device to the request distributor * @mex: pointer to the modexpo request buffer */ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, struct ica_rsa_modexpo *mex) { struct ap_message ap_msg; struct completion work; int rc; ap_init_message(&ap_msg); if (zdev->user_space_type == ZCRYPT_CEX2A) ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); else ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); if (rc) goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) rc = convert_response(zdev, &ap_msg, mex->outputdata, mex->outputdatalength); else /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); out_free: kfree(ap_msg.message); return rc; } /** * The request distributor calls this function if it picked the CEX2A * device to handle a modexpo_crt request. * @zdev: pointer to zcrypt_device structure that identifies the * CEX2A device to the request distributor * @crt: pointer to the modexpoc_crt request buffer */ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, struct ica_rsa_modexpo_crt *crt) { struct ap_message ap_msg; struct completion work; int rc; ap_init_message(&ap_msg); if (zdev->user_space_type == ZCRYPT_CEX2A) ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); else ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL); if (!ap_msg.message) return -ENOMEM; ap_msg.psmid = (((unsigned long long) current->pid) << 32) + atomic_inc_return(&zcrypt_step); ap_msg.private = &work; rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); if (rc) goto out_free; init_completion(&work); ap_queue_message(zdev->ap_dev, &ap_msg); rc = wait_for_completion_interruptible(&work); if (rc == 0) rc = convert_response(zdev, &ap_msg, crt->outputdata, crt->outputdatalength); else /* Signal pending. */ ap_cancel_message(zdev->ap_dev, &ap_msg); out_free: kfree(ap_msg.message); return rc; } /** * The crypto operations for a CEX2A card. */ static struct zcrypt_ops zcrypt_cex2a_ops = { .rsa_modexpo = zcrypt_cex2a_modexpo, .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, }; /** * Probe function for CEX2A cards. It always accepts the AP device * since the bus_match already checked the hardware type. * @ap_dev: pointer to the AP device. */ static int zcrypt_cex2a_probe(struct ap_device *ap_dev) { struct zcrypt_device *zdev = NULL; int rc = 0; switch (ap_dev->device_type) { case AP_DEVICE_TYPE_CEX2A: zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); if (!zdev) return -ENOMEM; zdev->user_space_type = ZCRYPT_CEX2A; zdev->type_string = "CEX2A"; zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; zdev->short_crt = 1; zdev->speed_rating = CEX2A_SPEED_RATING; zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; break; case AP_DEVICE_TYPE_CEX3A: zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE); if (!zdev) return -ENOMEM; zdev->user_space_type = ZCRYPT_CEX3A; zdev->type_string = "CEX3A"; zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; if (ap_4096_commands_available(ap_dev->qid)) { zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; } zdev->short_crt = 1; zdev->speed_rating = CEX3A_SPEED_RATING; break; } if (zdev != NULL) { zdev->ap_dev = ap_dev; zdev->ops = &zcrypt_cex2a_ops; zdev->online = 1; ap_dev->reply = &zdev->reply; ap_dev->private = zdev; rc = zcrypt_device_register(zdev); } if (rc) { ap_dev->private = NULL; zcrypt_device_free(zdev); } return rc; } /** * This is called to remove the extended CEX2A driver information * if an AP device is removed. */ static void zcrypt_cex2a_remove(struct ap_device *ap_dev) { struct zcrypt_device *zdev = ap_dev->private; zcrypt_device_unregister(zdev); } int __init zcrypt_cex2a_init(void) { return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); } void __exit zcrypt_cex2a_exit(void) { ap_driver_unregister(&zcrypt_cex2a_driver); } module_init(zcrypt_cex2a_init); module_exit(zcrypt_cex2a_exit);
gpl-2.0
mdalexca/marlin
drivers/isdn/hisax/enternow_pci.c
4807
11565
/* enternow_pci.c,v 0.99 2001/10/02 * * enternow_pci.c Card-specific routines for * Formula-n enter:now ISDN PCI ab * Gerdes AG Power ISDN PCI * Woerltronic SA 16 PCI * (based on HiSax driver by Karsten Keil) * * Author Christoph Ersfeld <info@formula-n.de> * Formula-n Europe AG (www.formula-n.com) * previously Gerdes AG * * * This file is (c) under GNU PUBLIC LICENSE * * Notes: * This driver interfaces to netjet.c which performs B-channel * processing. * * Version 0.99 is the first release of this driver and there are * certainly a few bugs. * It isn't testet on linux 2.4 yet, so consider this code to be * beta. * * Please don't report me any malfunction without sending * (compressed) debug-logs. * It would be nearly impossible to retrace it. * * Log D-channel-processing as follows: * * 1. Load hisax with card-specific parameters, this example ist for * Formula-n enter:now ISDN PCI and compatible * (f.e. Gerdes Power ISDN PCI) * * modprobe hisax type=41 protocol=2 id=gerdes * * if you chose an other value for id, you need to modify the * code below, too. * * 2. set debug-level * * hisaxctrl gerdes 1 0x3ff * hisaxctrl gerdes 11 0x4f * cat /dev/isdnctrl >> ~/log & * * Please take also a look into /var/log/messages if there is * anything importand concerning HISAX. * * * Credits: * Programming the driver for Formula-n enter:now ISDN PCI and * necessary the driver for the used Amd 7930 D-channel-controller * was spnsored by Formula-n Europe AG. * Thanks to Karsten Keil and Petr Novak, who gave me support in * Hisax-specific questions. * I want so say special thanks to Carl-Friedrich Braun, who had to * answer a lot of questions about generally ISDN and about handling * of the Amd-Chip. * */ #include "hisax.h" #include "isac.h" #include "isdnl1.h" #include "amd7930_fn.h" #include <linux/interrupt.h> #include <linux/ppp_defs.h> #include <linux/pci.h> #include <linux/init.h> #include "netjet.h" static const char *enternow_pci_rev = "$Revision: 1.1.4.5 $"; /* for PowerISDN PCI */ #define TJ_AMD_IRQ 0x20 #define TJ_LED1 0x40 #define TJ_LED2 0x80 /* The window to [the] AMD [chip]... * From address hw.njet.base + TJ_AMD_PORT onwards, the AMD * maps [consecutive/multiple] 8 bits into the TigerJet I/O space * -> 0x01 of the AMD at hw.njet.base + 0C4 */ #define TJ_AMD_PORT 0xC0 /* *************************** I/O-Interface functions ************************************* */ /* cs->readisac, macro rByteAMD */ static unsigned char ReadByteAmd7930(struct IsdnCardState *cs, unsigned char offset) { /* direct register */ if (offset < 8) return (inb(cs->hw.njet.isac + 4 * offset)); /* indirect register */ else { outb(offset, cs->hw.njet.isac + 4 * AMD_CR); return (inb(cs->hw.njet.isac + 4 * AMD_DR)); } } /* cs->writeisac, macro wByteAMD */ static void WriteByteAmd7930(struct IsdnCardState *cs, unsigned char offset, unsigned char value) { /* direct register */ if (offset < 8) outb(value, cs->hw.njet.isac + 4 * offset); /* indirect register */ else { outb(offset, cs->hw.njet.isac + 4 * AMD_CR); outb(value, cs->hw.njet.isac + 4 * AMD_DR); } } static void enpci_setIrqMask(struct IsdnCardState *cs, unsigned char val) { if (!val) outb(0x00, cs->hw.njet.base + NETJET_IRQMASK1); else outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1); } static unsigned char dummyrr(struct IsdnCardState *cs, int chan, unsigned char off) { return (5); } static void dummywr(struct IsdnCardState *cs, int chan, unsigned char off, unsigned char value) { } /* ******************************************************************************** */ static void reset_enpci(struct IsdnCardState *cs) { if (cs->debug & L1_DEB_ISAC) debugl1(cs, "enter:now PCI: reset"); /* Reset on, (also for AMD) */ cs->hw.njet.ctrl_reg = 0x07; outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); mdelay(20); /* Reset off */ cs->hw.njet.ctrl_reg = 0x30; outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); /* 20ms delay */ mdelay(20); cs->hw.njet.auxd = 0; // LED-status cs->hw.njet.dmactrl = 0; outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL); outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1); outb(cs->hw.njet.auxd, cs->hw.njet.auxa); // LED off } static int enpci_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; unsigned char *chan; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "enter:now PCI: card_msg: 0x%04X", mt); switch (mt) { case CARD_RESET: spin_lock_irqsave(&cs->lock, flags); reset_enpci(cs); Amd7930_init(cs); spin_unlock_irqrestore(&cs->lock, flags); break; case CARD_RELEASE: release_io_netjet(cs); break; case CARD_INIT: reset_enpci(cs); inittiger(cs); /* irq must be on here */ Amd7930_init(cs); break; case CARD_TEST: break; case MDL_ASSIGN: /* TEI assigned, LED1 on */ cs->hw.njet.auxd = TJ_AMD_IRQ << 1; outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA); break; case MDL_REMOVE: /* TEI removed, LEDs off */ cs->hw.njet.auxd = 0; outb(0x00, cs->hw.njet.base + NETJET_AUXDATA); break; case MDL_BC_ASSIGN: /* activate B-channel */ chan = (unsigned char *)arg; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "enter:now PCI: assign phys. BC %d in AMD LMR1", *chan); cs->dc.amd7930.ph_command(cs, (cs->dc.amd7930.lmr1 | (*chan + 1)), "MDL_BC_ASSIGN"); /* at least one b-channel in use, LED 2 on */ cs->hw.njet.auxd |= TJ_AMD_IRQ << 2; outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA); break; case MDL_BC_RELEASE: /* deactivate B-channel */ chan = (unsigned char *)arg; if (cs->debug & L1_DEB_ISAC) debugl1(cs, "enter:now PCI: release phys. BC %d in Amd LMR1", *chan); cs->dc.amd7930.ph_command(cs, (cs->dc.amd7930.lmr1 & ~(*chan + 1)), "MDL_BC_RELEASE"); /* no b-channel active -> LED2 off */ if (!(cs->dc.amd7930.lmr1 & 3)) { cs->hw.njet.auxd &= ~(TJ_AMD_IRQ << 2); outb(cs->hw.njet.auxd, cs->hw.njet.base + NETJET_AUXDATA); } break; default: break; } return (0); } static irqreturn_t enpci_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; unsigned char s0val, s1val, ir; u_long flags; spin_lock_irqsave(&cs->lock, flags); s1val = inb(cs->hw.njet.base + NETJET_IRQSTAT1); /* AMD threw an interrupt */ if (!(s1val & TJ_AMD_IRQ)) { /* read and clear interrupt-register */ ir = ReadByteAmd7930(cs, 0x00); Amd7930_interrupt(cs, ir); s1val = 1; } else s1val = 0; s0val = inb(cs->hw.njet.base + NETJET_IRQSTAT0); if ((s0val | s1val) == 0) { // shared IRQ spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (s0val) outb(s0val, cs->hw.njet.base + NETJET_IRQSTAT0); /* DMA-Interrupt: B-channel-stuff */ /* set bits in sval to indicate which page is free */ if (inl(cs->hw.njet.base + NETJET_DMA_WRITE_ADR) < inl(cs->hw.njet.base + NETJET_DMA_WRITE_IRQ)) /* the 2nd write page is free */ s0val = 0x08; else /* the 1st write page is free */ s0val = 0x04; if (inl(cs->hw.njet.base + NETJET_DMA_READ_ADR) < inl(cs->hw.njet.base + NETJET_DMA_READ_IRQ)) /* the 2nd read page is free */ s0val = s0val | 0x02; else /* the 1st read page is free */ s0val = s0val | 0x01; if (s0val != cs->hw.njet.last_is0) /* we have a DMA interrupt */ { if (test_and_set_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags)) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } cs->hw.njet.irqstat0 = s0val; if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_READ) != (cs->hw.njet.last_is0 & NETJET_IRQM0_READ)) /* we have a read dma int */ read_tiger(cs); if ((cs->hw.njet.irqstat0 & NETJET_IRQM0_WRITE) != (cs->hw.njet.last_is0 & NETJET_IRQM0_WRITE)) /* we have a write dma int */ write_tiger(cs); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); } spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static int en_pci_probe(struct pci_dev *dev_netjet, struct IsdnCardState *cs) { if (pci_enable_device(dev_netjet)) return (0); cs->irq = dev_netjet->irq; if (!cs->irq) { printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n"); return (0); } cs->hw.njet.base = pci_resource_start(dev_netjet, 0); if (!cs->hw.njet.base) { printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n"); return (0); } /* checks Sub-Vendor ID because system crashes with Traverse-Card */ if ((dev_netjet->subsystem_vendor != 0x55) || (dev_netjet->subsystem_device != 0x02)) { printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n"); printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n"); return (0); } return (1); } static void en_cs_init(struct IsdnCard *card, struct IsdnCardState *cs) { cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD /* Reset an */ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); /* 20 ms Pause */ mdelay(20); cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */ outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); mdelay(10); cs->hw.njet.auxd = 0x00; // war 0xc0 cs->hw.njet.dmactrl = 0; outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL); outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1); outb(cs->hw.njet.auxd, cs->hw.njet.auxa); } static int en_cs_init_rest(struct IsdnCard *card, struct IsdnCardState *cs) { const int bytecnt = 256; printk(KERN_INFO "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n", cs->hw.njet.base, cs->irq); if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) { printk(KERN_WARNING "HiSax: enter:now config port %lx-%lx already in use\n", cs->hw.njet.base, cs->hw.njet.base + bytecnt); return (0); } setup_Amd7930(cs); cs->hw.njet.last_is0 = 0; /* macro rByteAMD */ cs->readisac = &ReadByteAmd7930; /* macro wByteAMD */ cs->writeisac = &WriteByteAmd7930; cs->dc.amd7930.setIrqMask = &enpci_setIrqMask; cs->BC_Read_Reg = &dummyrr; cs->BC_Write_Reg = &dummywr; cs->BC_Send_Data = &netjet_fill_dma; cs->cardmsg = &enpci_card_msg; cs->irq_func = &enpci_interrupt; cs->irq_flags |= IRQF_SHARED; return (1); } static struct pci_dev *dev_netjet = NULL; /* called by config.c */ int setup_enternow_pci(struct IsdnCard *card) { int ret; struct IsdnCardState *cs = card->cs; char tmp[64]; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, enternow_pci_rev); printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_ENTERNOW) return (0); test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); for (;;) { if ((dev_netjet = hisax_find_pci_device(PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { ret = en_pci_probe(dev_netjet, cs); if (!ret) return (0); } else { printk(KERN_WARNING "enter:now PCI: No PCI card found\n"); return (0); } en_cs_init(card, cs); break; } return en_cs_init_rest(card, cs); }
gpl-2.0
RealVNC/Android-kernel-mako-NCM
drivers/net/ethernet/qlogic/qlge/qlge_main.c
4807
135228
/* * QLogic qlge NIC HBA Driver * Copyright (c) 2003-2008 QLogic Corporation * See LICENSE.qlge for copyright and licensing details. * Author: Linux qlge network device driver by * Ron Mercer <ron.mercer@qlogic.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/module.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/dmapool.h> #include <linux/mempool.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/prefetch.h> #include <net/ip6_checksum.h> #include "qlge.h" char qlge_driver_name[] = DRV_NAME; const char qlge_driver_version[] = DRV_VERSION; MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); MODULE_DESCRIPTION(DRV_STRING " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | /* NETIF_MSG_TIMER | */ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR | /* NETIF_MSG_TX_QUEUED | */ /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ /* NETIF_MSG_PKTDATA | */ NETIF_MSG_HW | NETIF_MSG_WOL | 0; static int debug = -1; /* defaults above */ module_param(debug, int, 0664); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 static int qlge_irq_type = MSIX_IRQ; module_param(qlge_irq_type, int, 0664); MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static int qlge_mpi_coredump; module_param(qlge_mpi_coredump, int, 0); MODULE_PARM_DESC(qlge_mpi_coredump, "Option to enable MPI firmware dump. " "Default is OFF - Do Not allocate memory. "); static int qlge_force_coredump; module_param(qlge_force_coredump, int, 0); MODULE_PARM_DESC(qlge_force_coredump, "Option to allow force of firmware core dump. " "Default is OFF - Do not allow."); static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); static int ql_wol(struct ql_adapter *qdev); static void qlge_set_multicast_list(struct net_device *ndev); /* This hardware semaphore causes exclusive access to * resources shared between the NIC driver, MPI firmware, * FCOE firmware and the FC driver. */ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) { u32 sem_bits = 0; switch (sem_mask) { case SEM_XGMAC0_MASK: sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; break; case SEM_XGMAC1_MASK: sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; break; case SEM_ICB_MASK: sem_bits = SEM_SET << SEM_ICB_SHIFT; break; case SEM_MAC_ADDR_MASK: sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; break; case SEM_FLASH_MASK: sem_bits = SEM_SET << SEM_FLASH_SHIFT; break; case SEM_PROBE_MASK: sem_bits = SEM_SET << SEM_PROBE_SHIFT; break; case SEM_RT_IDX_MASK: sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; break; case SEM_PROC_REG_MASK: sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; break; default: netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); return -EINVAL; } ql_write32(qdev, SEM, sem_bits | sem_mask); return !(ql_read32(qdev, SEM) & sem_bits); } int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) { unsigned int wait_count = 30; do { if (!ql_sem_trylock(qdev, sem_mask)) return 0; udelay(100); } while (--wait_count); return -ETIMEDOUT; } void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) { ql_write32(qdev, SEM, sem_mask); ql_read32(qdev, SEM); /* flush */ } /* This function waits for a specific bit to come ready * in a given register. It is used mostly by the initialize * process, but is also used in kernel thread API such as * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. */ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) { u32 temp; int count = UDELAY_COUNT; while (count) { temp = ql_read32(qdev, reg); /* check for errors */ if (temp & err_bit) { netif_alert(qdev, probe, qdev->ndev, "register 0x%.08x access error, value = 0x%.08x!.\n", reg, temp); return -EIO; } else if (temp & bit) return 0; udelay(UDELAY_DELAY); count--; } netif_alert(qdev, probe, qdev->ndev, "Timed out waiting for reg %x to come ready.\n", reg); return -ETIMEDOUT; } /* The CFG register is used to download TX and RX control blocks * to the chip. This function waits for an operation to complete. */ static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) { int count = UDELAY_COUNT; u32 temp; while (count) { temp = ql_read32(qdev, CFG); if (temp & CFG_LE) return -EIO; if (!(temp & bit)) return 0; udelay(UDELAY_DELAY); count--; } return -ETIMEDOUT; } /* Used to issue init control blocks to hw. Maps control block, * sets address, triggers download, waits for completion. */ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, u16 q_id) { u64 map; int status = 0; int direction; u32 mask; u32 value; direction = (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; map = pci_map_single(qdev->pdev, ptr, size, direction); if (pci_dma_mapping_error(qdev->pdev, map)) { netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); return -ENOMEM; } status = ql_sem_spinlock(qdev, SEM_ICB_MASK); if (status) return status; status = ql_wait_cfg(qdev, bit); if (status) { netif_err(qdev, ifup, qdev->ndev, "Timed out waiting for CFG to come ready.\n"); goto exit; } ql_write32(qdev, ICB_L, (u32) map); ql_write32(qdev, ICB_H, (u32) (map >> 32)); mask = CFG_Q_MASK | (bit << 16); value = bit | (q_id << CFG_Q_SHIFT); ql_write32(qdev, CFG, (mask | value)); /* * Wait for the bit to clear after signaling hw. */ status = ql_wait_cfg(qdev, bit); exit: ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ pci_unmap_single(qdev->pdev, map, size, direction); return status; } /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, u32 *value) { u32 offset = 0; int status; switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: case MAC_ADDR_TYPE_CAM_MAC: { status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); if (type == MAC_ADDR_TYPE_CAM_MAC) { status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); if (status) goto exit; *value++ = ql_read32(qdev, MAC_ADDR_DATA); } break; } case MAC_ADDR_TYPE_VLAN: case MAC_ADDR_TYPE_MULTI_FLTR: default: netif_crit(qdev, ifup, qdev->ndev, "Address type %d not yet supported.\n", type); status = -EPERM; } exit: return status; } /* Set up a MAC, multicast or VLAN address for the * inbound frame matching. */ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, u16 index) { u32 offset = 0; int status = 0; switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: { u32 upper = (addr[0] << 8) | addr[1]; u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | (addr[5]); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | MAC_ADDR_E); ql_write32(qdev, MAC_ADDR_DATA, lower); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | MAC_ADDR_E); ql_write32(qdev, MAC_ADDR_DATA, upper); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; break; } case MAC_ADDR_TYPE_CAM_MAC: { u32 cam_output; u32 upper = (addr[0] << 8) | addr[1]; u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | (addr[5]); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ ql_write32(qdev, MAC_ADDR_DATA, lower); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ ql_write32(qdev, MAC_ADDR_DATA, upper); status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type); /* type */ /* This field should also include the queue id and possibly the function id. Right now we hardcode the route field to NIC core. */ cam_output = (CAM_OUT_ROUTE_NIC | (qdev-> func << CAM_OUT_FUNC_SHIFT) | (0 << CAM_OUT_CQ_ID_SHIFT)); if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) cam_output |= CAM_OUT_RV; /* route to NIC core */ ql_write32(qdev, MAC_ADDR_DATA, cam_output); break; } case MAC_ADDR_TYPE_VLAN: { u32 enable_bit = *((u32 *) &addr[0]); /* For VLAN, the addr actually holds a bit that * either enables or disables the vlan id we are * addressing. It's either MAC_ADDR_E on or off. * That's bit-27 we're talking about. */ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); if (status) goto exit; ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ (index << MAC_ADDR_IDX_SHIFT) | /* index */ type | /* type */ enable_bit); /* enable/disable */ break; } case MAC_ADDR_TYPE_MULTI_FLTR: default: netif_crit(qdev, ifup, qdev->ndev, "Address type %d not yet supported.\n", type); status = -EPERM; } exit: return status; } /* Set or clear MAC address in hardware. We sometimes * have to clear it to prevent wrong frame routing * especially in a bonding environment. */ static int ql_set_mac_addr(struct ql_adapter *qdev, int set) { int status; char zero_mac_addr[ETH_ALEN]; char *addr; if (set) { addr = &qdev->current_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Set Mac addr %pM\n", addr); } else { memset(zero_mac_addr, 0, ETH_ALEN); addr = &zero_mac_addr[0]; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Clearing MAC address\n"); } status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; status = ql_set_mac_addr_reg(qdev, (u8 *) addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); return status; } void ql_link_on(struct ql_adapter *qdev) { netif_err(qdev, link, qdev->ndev, "Link is up.\n"); netif_carrier_on(qdev->ndev); ql_set_mac_addr(qdev, 1); } void ql_link_off(struct ql_adapter *qdev) { netif_err(qdev, link, qdev->ndev, "Link is down.\n"); netif_carrier_off(qdev->ndev); ql_set_mac_addr(qdev, 0); } /* Get a specific frame routing value from the CAM. * Used for debug and reg dump. */ int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) { int status = 0; status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); if (status) goto exit; ql_write32(qdev, RT_IDX, RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); if (status) goto exit; *value = ql_read32(qdev, RT_DATA); exit: return status; } /* The NIC function for this chip has 16 routing indexes. Each one can be used * to route different frame types to various inbound queues. We send broadcast/ * multicast/error frames to the default queue for slow handling, * and CAM hit/RSS frames to the fast handling queues. */ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, int enable) { int status = -EINVAL; /* Return error if no mask match. */ u32 value = 0; switch (mask) { case RT_IDX_CAM_HIT: { value = RT_IDX_DST_CAM_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_VALID: /* Promiscuous Mode frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_IP_CSUM_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */ break; } case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << RT_IDX_IDX_SHIFT); /* index */ break; } case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST: /* Pass up All Multicast frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ { value = RT_IDX_DST_RSS | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case 0: /* Clear the E-bit on an entry. */ { value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (index << RT_IDX_IDX_SHIFT);/* index */ break; } default: netif_err(qdev, ifup, qdev->ndev, "Mask type %d not yet supported.\n", mask); status = -EPERM; goto exit; } if (value) { status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); if (status) goto exit; value |= (enable ? RT_IDX_E : 0); ql_write32(qdev, RT_IDX, value); ql_write32(qdev, RT_DATA, enable ? mask : 0); } exit: return status; } static void ql_enable_interrupts(struct ql_adapter *qdev) { ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); } static void ql_disable_interrupts(struct ql_adapter *qdev) { ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); } /* If we're running with multiple MSI-X vectors then we enable on the fly. * Otherwise, we may have multiple outstanding workers and don't want to * enable until the last one finishes. In this case, the irq_cnt gets * incremented every time we queue a worker and decremented every time * a worker finishes. Once it hits zero we enable the interrupt. */ u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { u32 var = 0; unsigned long hw_flags = 0; struct intr_context *ctx = qdev->intr_context + intr; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { /* Always enable if we're MSIX multi interrupts and * it's not the default (zeroeth) interrupt. */ ql_write32(qdev, INTR_EN, ctx->intr_en_mask); var = ql_read32(qdev, STS); return var; } spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (atomic_dec_and_test(&ctx->irq_cnt)) { ql_write32(qdev, INTR_EN, ctx->intr_en_mask); var = ql_read32(qdev, STS); } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return var; } static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { u32 var = 0; struct intr_context *ctx; /* HW disables for us if we're MSIX multi interrupts and * it's not the default (zeroeth) interrupt. */ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) return 0; ctx = qdev->intr_context + intr; spin_lock(&qdev->hw_lock); if (!atomic_read(&ctx->irq_cnt)) { ql_write32(qdev, INTR_EN, ctx->intr_dis_mask); var = ql_read32(qdev, STS); } atomic_inc(&ctx->irq_cnt); spin_unlock(&qdev->hw_lock); return var; } static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) { int i; for (i = 0; i < qdev->intr_count; i++) { /* The enable call does a atomic_dec_and_test * and enables only if the result is zero. * So we precharge it here. */ if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || i == 0)) atomic_set(&qdev->intr_context[i].irq_cnt, 1); ql_enable_completion_interrupt(qdev, i); } } static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) { int status, i; u16 csum = 0; __le16 *flash = (__le16 *)&qdev->flash; status = strncmp((char *)&qdev->flash, str, 4); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); return status; } for (i = 0; i < size; i++) csum += le16_to_cpu(*flash++); if (csum) netif_err(qdev, ifup, qdev->ndev, "Invalid flash checksum, csum = 0x%.04x.\n", csum); return csum; } static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); if (status) goto exit; /* This data is stored on flash as an array of * __le32. Since ql_read32() returns cpu endian * we need to swap it back. */ *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); exit: return status; } static int ql_get_8000_flash_params(struct ql_adapter *qdev) { u32 i, size; int status; __le32 *p = (__le32 *)&qdev->flash; u32 offset; u8 mac_addr[6]; /* Get flash offset for function and adjust * for dword access. */ if (!qdev->port) offset = FUNC0_FLASH_OFFSET / sizeof(u32); else offset = FUNC1_FLASH_OFFSET / sizeof(u32); if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) return -ETIMEDOUT; size = sizeof(struct flash_params_8000) / sizeof(u32); for (i = 0; i < size; i++, p++) { status = ql_read_flash_word(qdev, i+offset, p); if (status) { netif_err(qdev, ifup, qdev->ndev, "Error reading flash.\n"); goto exit; } } status = ql_validate_flash(qdev, sizeof(struct flash_params_8000) / sizeof(u16), "8000"); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); status = -EINVAL; goto exit; } /* Extract either manufacturer or BOFM modified * MAC address. */ if (qdev->flash.flash_params_8000.data_type1 == 2) memcpy(mac_addr, qdev->flash.flash_params_8000.mac_addr1, qdev->ndev->addr_len); else memcpy(mac_addr, qdev->flash.flash_params_8000.mac_addr, qdev->ndev->addr_len); if (!is_valid_ether_addr(mac_addr)) { netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); status = -EINVAL; goto exit; } memcpy(qdev->ndev->dev_addr, mac_addr, qdev->ndev->addr_len); exit: ql_sem_unlock(qdev, SEM_FLASH_MASK); return status; } static int ql_get_8012_flash_params(struct ql_adapter *qdev) { int i; int status; __le32 *p = (__le32 *)&qdev->flash; u32 offset = 0; u32 size = sizeof(struct flash_params_8012) / sizeof(u32); /* Second function's parameters follow the first * function's. */ if (qdev->port) offset = size; if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) return -ETIMEDOUT; for (i = 0; i < size; i++, p++) { status = ql_read_flash_word(qdev, i+offset, p); if (status) { netif_err(qdev, ifup, qdev->ndev, "Error reading flash.\n"); goto exit; } } status = ql_validate_flash(qdev, sizeof(struct flash_params_8012) / sizeof(u16), "8012"); if (status) { netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); status = -EINVAL; goto exit; } if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { status = -EINVAL; goto exit; } memcpy(qdev->ndev->dev_addr, qdev->flash.flash_params_8012.mac_addr, qdev->ndev->addr_len); exit: ql_sem_unlock(qdev, SEM_FLASH_MASK); return status; } /* xgmac register are located behind the xgmac_addr and xgmac_data * register pair. Each read/write requires us to wait for the ready * bit before reading/writing the data. */ static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) { int status; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) return status; /* write the data to the data reg */ ql_write32(qdev, XGMAC_DATA, data); /* trigger the write */ ql_write32(qdev, XGMAC_ADDR, reg); return status; } /* xgmac register are located behind the xgmac_addr and xgmac_data * register pair. Each read/write requires us to wait for the ready * bit before reading/writing the data. */ int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) { int status = 0; /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* set up for reg read */ ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); /* wait for reg to come ready */ status = ql_wait_reg_rdy(qdev, XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); if (status) goto exit; /* get the data */ *data = ql_read32(qdev, XGMAC_DATA); exit: return status; } /* This is used for reading the 64-bit statistics regs. */ int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) { int status = 0; u32 hi = 0; u32 lo = 0; status = ql_read_xgmac_reg(qdev, reg, &lo); if (status) goto exit; status = ql_read_xgmac_reg(qdev, reg + 4, &hi); if (status) goto exit; *data = (u64) lo | ((u64) hi << 32); exit: return status; } static int ql_8000_port_initialize(struct ql_adapter *qdev) { int status; /* * Get MPI firmware version for driver banner * and ethool info. */ status = ql_mb_about_fw(qdev); if (status) goto exit; status = ql_mb_get_fw_state(qdev); if (status) goto exit; /* Wake up a worker to get/set the TX/RX frame sizes. */ queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); exit: return status; } /* Take the MAC Core out of reset. * Enable statistics counting. * Take the transmitter/receiver out of reset. * This functionality may be done in the MPI firmware at a * later date. */ static int ql_8012_port_initialize(struct ql_adapter *qdev) { int status = 0; u32 data; if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { /* Another function has the semaphore, so * wait for the port init bit to come ready. */ netif_info(qdev, link, qdev->ndev, "Another function has the semaphore, so wait for the port init bit to come ready.\n"); status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); if (status) { netif_crit(qdev, link, qdev->ndev, "Port initialize timed out.\n"); } return status; } netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); /* Set the core reset. */ status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); if (status) goto end; data |= GLOBAL_CFG_RESET; status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); if (status) goto end; /* Clear the core reset and turn on jumbo for receiver. */ data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ data |= GLOBAL_CFG_TX_STAT_EN; data |= GLOBAL_CFG_RX_STAT_EN; status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); if (status) goto end; /* Enable transmitter, and clear it's reset. */ status = ql_read_xgmac_reg(qdev, TX_CFG, &data); if (status) goto end; data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ data |= TX_CFG_EN; /* Enable the transmitter. */ status = ql_write_xgmac_reg(qdev, TX_CFG, data); if (status) goto end; /* Enable receiver and clear it's reset. */ status = ql_read_xgmac_reg(qdev, RX_CFG, &data); if (status) goto end; data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ data |= RX_CFG_EN; /* Enable the receiver. */ status = ql_write_xgmac_reg(qdev, RX_CFG, data); if (status) goto end; /* Turn on jumbo. */ status = ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); if (status) goto end; status = ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); if (status) goto end; /* Signal to the world that the port is enabled. */ ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); end: ql_sem_unlock(qdev, qdev->xg_sem_mask); return status; } static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) { return PAGE_SIZE << qdev->lbq_buf_order; } /* Get the next large buffer. */ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) { struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; rx_ring->lbq_curr_idx++; if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) rx_ring->lbq_curr_idx = 0; rx_ring->lbq_free_cnt++; return lbq_desc; } static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr(lbq_desc, mapaddr), rx_ring->lbq_buf_size, PCI_DMA_FROMDEVICE); /* If it's the last chunk of our master page then * we unmap it. */ if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) == ql_lbq_block_size(qdev)) pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); return lbq_desc; } /* Get the next small buffer. */ static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) { struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; rx_ring->sbq_curr_idx++; if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) rx_ring->sbq_curr_idx = 0; rx_ring->sbq_free_cnt++; return sbq_desc; } /* Update an rx ring index. */ static void ql_update_cq(struct rx_ring *rx_ring) { rx_ring->cnsmr_idx++; rx_ring->curr_entry++; if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { rx_ring->cnsmr_idx = 0; rx_ring->curr_entry = rx_ring->cq_base; } } static void ql_write_cq_idx(struct rx_ring *rx_ring) { ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); } static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct bq_desc *lbq_desc) { if (!rx_ring->pg_chunk.page) { u64 map; rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, qdev->lbq_buf_order); if (unlikely(!rx_ring->pg_chunk.page)) { netif_err(qdev, drv, qdev->ndev, "page allocation failed.\n"); return -ENOMEM; } rx_ring->pg_chunk.offset = 0; map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, 0, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; } rx_ring->pg_chunk.map = map; rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); } /* Copy the current master pg_chunk info * to the current descriptor. */ lbq_desc->p.pg_chunk = rx_ring->pg_chunk; /* Adjust the master page chunk for next * buffer get. */ rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { rx_ring->pg_chunk.page = NULL; lbq_desc->p.pg_chunk.last_flag = 1; } else { rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; get_page(rx_ring->pg_chunk.page); lbq_desc->p.pg_chunk.last_flag = 0; } return 0; } /* Process (refill) a large buffer queue. */ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) { u32 clean_idx = rx_ring->lbq_clean_idx; u32 start_idx = clean_idx; struct bq_desc *lbq_desc; u64 map; int i; while (rx_ring->lbq_free_cnt > 32) { for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "lbq: try cleaning clean_idx = %d.\n", clean_idx); lbq_desc = &rx_ring->lbq[clean_idx]; if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { rx_ring->lbq_clean_idx = clean_idx; netif_err(qdev, ifup, qdev->ndev, "Could not get a page chunk, i=%d, clean_idx =%d .\n", i, clean_idx); return; } map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; dma_unmap_addr_set(lbq_desc, mapaddr, map); dma_unmap_len_set(lbq_desc, maplen, rx_ring->lbq_buf_size); *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, rx_ring->lbq_buf_size, PCI_DMA_FROMDEVICE); clean_idx++; if (clean_idx == rx_ring->lbq_len) clean_idx = 0; } rx_ring->lbq_clean_idx = clean_idx; rx_ring->lbq_prod_idx += 16; if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) rx_ring->lbq_prod_idx = 0; rx_ring->lbq_free_cnt -= 16; } if (start_idx != clean_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "lbq: updating prod idx = %d.\n", rx_ring->lbq_prod_idx); ql_write_db_reg(rx_ring->lbq_prod_idx, rx_ring->lbq_prod_idx_db_reg); } } /* Process (refill) a small buffer queue. */ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) { u32 clean_idx = rx_ring->sbq_clean_idx; u32 start_idx = clean_idx; struct bq_desc *sbq_desc; u64 map; int i; while (rx_ring->sbq_free_cnt > 16) { for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { sbq_desc = &rx_ring->sbq[clean_idx]; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: try cleaning clean_idx = %d.\n", clean_idx); if (sbq_desc->p.skb == NULL) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: getting new skb for index %d.\n", sbq_desc->index); sbq_desc->p.skb = netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE); if (sbq_desc->p.skb == NULL) { netif_err(qdev, probe, qdev->ndev, "Couldn't get an skb.\n"); rx_ring->sbq_clean_idx = clean_idx; return; } skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); map = pci_map_single(qdev->pdev, sbq_desc->p.skb->data, rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(qdev->pdev, map)) { netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n"); rx_ring->sbq_clean_idx = clean_idx; dev_kfree_skb_any(sbq_desc->p.skb); sbq_desc->p.skb = NULL; return; } dma_unmap_addr_set(sbq_desc, mapaddr, map); dma_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size); *sbq_desc->addr = cpu_to_le64(map); } clean_idx++; if (clean_idx == rx_ring->sbq_len) clean_idx = 0; } rx_ring->sbq_clean_idx = clean_idx; rx_ring->sbq_prod_idx += 16; if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) rx_ring->sbq_prod_idx = 0; rx_ring->sbq_free_cnt -= 16; } if (start_idx != clean_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "sbq: updating prod idx = %d.\n", rx_ring->sbq_prod_idx); ql_write_db_reg(rx_ring->sbq_prod_idx, rx_ring->sbq_prod_idx_db_reg); } } static void ql_update_buffer_queues(struct ql_adapter *qdev, struct rx_ring *rx_ring) { ql_update_sbq(qdev, rx_ring); ql_update_lbq(qdev, rx_ring); } /* Unmaps tx buffers. Can be called from send() if a pci mapping * fails at some stage, or from the interrupt when a tx completes. */ static void ql_unmap_send(struct ql_adapter *qdev, struct tx_ring_desc *tx_ring_desc, int mapped) { int i; for (i = 0; i < mapped; i++) { if (i == 0 || (i == 7 && mapped > 7)) { /* * Unmap the skb->data area, or the * external sglist (AKA the Outbound * Address List (OAL)). * If its the zeroeth element, then it's * the skb->data area. If it's the 7th * element and there is more than 6 frags, * then its an OAL. */ if (i == 7) { netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, "unmapping OAL area.\n"); } pci_unmap_single(qdev->pdev, dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } else { netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, "unmapping frag %d.\n", i); pci_unmap_page(qdev->pdev, dma_unmap_addr(&tx_ring_desc->map[i], mapaddr), dma_unmap_len(&tx_ring_desc->map[i], maplen), PCI_DMA_TODEVICE); } } } /* Map the buffers for this transmit. This will return * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. */ static int ql_map_send(struct ql_adapter *qdev, struct ob_mac_iocb_req *mac_iocb_ptr, struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) { int len = skb_headlen(skb); dma_addr_t map; int frag_idx, err, map_idx = 0; struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; int frag_cnt = skb_shinfo(skb)->nr_frags; if (frag_cnt) { netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "frag_cnt = %d.\n", frag_cnt); } /* * Map the skb buffer first. */ map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping failed with error: %d\n", err); return NETDEV_TX_BUSY; } tbd->len = cpu_to_le32(len); tbd->addr = cpu_to_le64(map); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); map_idx++; /* * This loop fills the remainder of the 8 address descriptors * in the IOCB. If there are more than 7 fragments, then the * eighth address desc will point to an external list (OAL). * When this happens, the remainder of the frags will be stored * in this list. */ for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; tbd++; if (frag_idx == 6 && frag_cnt > 7) { /* Let's tack on an sglist. * Our control block will now * look like this: * iocb->seg[0] = skb->data * iocb->seg[1] = frag[0] * iocb->seg[2] = frag[1] * iocb->seg[3] = frag[2] * iocb->seg[4] = frag[3] * iocb->seg[5] = frag[4] * iocb->seg[6] = frag[5] * iocb->seg[7] = ptr to OAL (external sglist) * oal->seg[0] = frag[6] * oal->seg[1] = frag[7] * oal->seg[2] = frag[8] * oal->seg[3] = frag[9] * oal->seg[4] = frag[10] * etc... */ /* Tack on the OAL in the eighth segment of IOCB. */ map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, sizeof(struct oal), PCI_DMA_TODEVICE); err = pci_dma_mapping_error(qdev->pdev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping outbound address list with error: %d\n", err); goto map_error; } tbd->addr = cpu_to_le64(map); /* * The length is the number of fragments * that remain to be mapped times the length * of our sglist (OAL). */ tbd->len = cpu_to_le32((sizeof(struct tx_buf_desc) * (frag_cnt - frag_idx)) | TX_DESC_C); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, sizeof(struct oal)); tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; map_idx++; } map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); err = dma_mapping_error(&qdev->pdev->dev, map); if (err) { netif_err(qdev, tx_queued, qdev->ndev, "PCI mapping frags failed with error: %d.\n", err); goto map_error; } tbd->addr = cpu_to_le64(map); tbd->len = cpu_to_le32(skb_frag_size(frag)); dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, skb_frag_size(frag)); } /* Save the number of segments we've mapped. */ tx_ring_desc->map_cnt = map_idx; /* Terminate the last segment. */ tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); return NETDEV_TX_OK; map_error: /* * If the first frag mapping failed, then i will be zero. * This causes the unmap of the skb->data area. Otherwise * we pass in the number of frags that mapped successfully * so they can be umapped. */ ql_unmap_send(qdev, tx_ring_desc, map_idx); return NETDEV_TX_BUSY; } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct sk_buff *skb; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; napi->dev = qdev->ndev; skb = napi_get_frags(napi); if (!skb) { netif_err(qdev, drv, qdev->ndev, "Couldn't get an skb, exiting.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; } prefetch(lbq_desc->p.pg_chunk.va); __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; skb_shinfo(skb)->nr_frags++; rx_ring->rx_packets++; rx_ring->rx_bytes += length; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); napi_gro_frags(napi); } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_page(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; void *addr; struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); struct napi_struct *napi = &rx_ring->napi; skb = netdev_alloc_skb(ndev, length); if (!skb) { netif_err(qdev, drv, qdev->ndev, "Couldn't get an skb, need to unwind!.\n"); rx_ring->rx_dropped++; put_page(lbq_desc->p.pg_chunk.page); return; } addr = lbq_desc->p.pg_chunk.va; prefetch(addr); /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); rx_ring->rx_errors++; goto err_out; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { netif_err(qdev, drv, qdev->ndev, "Segment too small, dropping.\n"); rx_ring->rx_dropped++; goto err_out; } memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset+ETH_HLEN, length-ETH_HLEN); skb->len += length-ETH_HLEN; skb->data_len += length-ETH_HLEN; skb->truesize += length-ETH_HLEN; rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) ((u8 *)addr + ETH_HLEN); if (!(iph->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "UDP checksum done!\n"); } } } skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(napi, skb); else netif_receive_skb(skb); return; err_out: dev_kfree_skb_any(skb); put_page(lbq_desc->p.pg_chunk.page); } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u32 length, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; struct sk_buff *new_skb = NULL; struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); skb = sbq_desc->p.skb; /* Allocate new_skb and copy */ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); if (new_skb == NULL) { netif_err(qdev, probe, qdev->ndev, "No skb available, drop the packet.\n"); rx_ring->rx_dropped++; return; } skb_reserve(new_skb, NET_IP_ALIGN); memcpy(skb_put(new_skb, length), skb->data, length); skb = new_skb; /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; return; } /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); dev_kfree_skb_any(skb); return; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { dev_kfree_skb_any(skb); rx_ring->rx_dropped++; return; } prefetch(skb->data); skb->dev = ndev; if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); } if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Promiscuous Packet.\n"); rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* If rx checksum is on, and there are no * csum or frame errors. */ if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "UDP checksum done!\n"); } } } skb_record_rx_queue(skb, rx_ring->cq_id); if (vlan_id != 0xffff) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else netif_receive_skb(skb); } static void ql_realign_skb(struct sk_buff *skb, int len) { void *temp_addr = skb->data; /* Undo the skb_reserve(skb,32) we did before * giving to hardware, and realign data on * a 2-byte boundary. */ skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; skb_copy_to_linear_data(skb, temp_addr, (unsigned int)len); } /* * This function builds an skb for the given inbound * completion. It will be rewritten for readability in the near * future, but for not it works well. */ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp) { struct bq_desc *lbq_desc; struct bq_desc *sbq_desc; struct sk_buff *skb = NULL; u32 length = le32_to_cpu(ib_mac_rsp->data_len); u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); /* * Handle the header buffer if present. */ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Header of %d bytes in small buffer.\n", hdr_len); /* * Headers fit nicely into a small buffer. */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); skb = sbq_desc->p.skb; ql_realign_skb(skb, hdr_len); skb_put(skb, hdr_len); sbq_desc->p.skb = NULL; } /* * Handle the data buffer(s). */ if (unlikely(!length)) { /* Is there data too? */ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "No Data buffer in this packet.\n"); return skb; } if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Headers in small, data of %d bytes in small, combine them.\n", length); /* * Data is less than small buffer size so it's * stuffed in a small buffer. * For this case we append the data * from the "data" small buffer to the "header" small * buffer. */ sbq_desc = ql_get_curr_sbuf(rx_ring); pci_dma_sync_single_for_cpu(qdev->pdev, dma_unmap_addr (sbq_desc, mapaddr), dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); memcpy(skb_put(skb, length), sbq_desc->p.skb->data, length); pci_dma_sync_single_for_device(qdev->pdev, dma_unmap_addr (sbq_desc, mapaddr), dma_unmap_len (sbq_desc, maplen), PCI_DMA_FROMDEVICE); } else { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes in a single small buffer.\n", length); sbq_desc = ql_get_curr_sbuf(rx_ring); skb = sbq_desc->p.skb; ql_realign_skb(skb, length); skb_put(skb, length); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); sbq_desc->p.skb = NULL; } } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Header in small, %d bytes in large. Chain large to small!\n", length); /* * The data is in a single large buffer. We * chain it to the header buffer's skb and let * it rip. */ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Chaining page at offset = %d, for %d bytes to skb.\n", lbq_desc->p.pg_chunk.offset, length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; } else { /* * The headers and data are in a single large buffer. We * copy it to a new skb and let it go. This can happen with * jumbo mtu on a non-TCP/UDP frame. */ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); skb = netdev_alloc_skb(qdev->ndev, length); if (skb == NULL) { netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, "No skb available, drop the packet.\n"); return NULL; } pci_unmap_page(qdev->pdev, dma_unmap_addr(lbq_desc, mapaddr), dma_unmap_len(lbq_desc, maplen), PCI_DMA_FROMDEVICE); skb_reserve(skb, NET_IP_ALIGN); netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, length); skb->len += length; skb->data_len += length; skb->truesize += length; length -= length; __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? VLAN_ETH_HLEN : ETH_HLEN); } } else { /* * The data is in a chain of large buffers * pointed to by a small buffer. We loop * thru and chain them to the our small header * buffer's skb. * frags: There are 18 max frags and our small * buffer will hold 32 of them. The thing is, * we'll use 3 max for our 9000 byte jumbo * frames. If the MTU goes up we could * eventually be in trouble. */ int size, i = 0; sbq_desc = ql_get_curr_sbuf(rx_ring); pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { /* * This is an non TCP/UDP IP frame, so * the headers aren't split into a small * buffer. We have to use the small buffer * that contains our sg list as our skb to * send upstairs. Copy the sg list here to * a local buffer and use it to find the * pages to chain. */ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%d bytes of headers & data in chain of large.\n", length); skb = sbq_desc->p.skb; sbq_desc->p.skb = NULL; skb_reserve(skb, NET_IP_ALIGN); } while (length > 0) { lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); size = (length < rx_ring->lbq_buf_size) ? length : rx_ring->lbq_buf_size; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Adding page %d to skb for %d bytes.\n", i, size); skb_fill_page_desc(skb, i, lbq_desc->p.pg_chunk.page, lbq_desc->p.pg_chunk.offset, size); skb->len += size; skb->data_len += size; skb->truesize += size; length -= size; i++; } __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? VLAN_ETH_HLEN : ETH_HLEN); } return skb; } /* Process an inbound completion from an rx ring. */ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp, u16 vlan_id) { struct net_device *ndev = qdev->ndev; struct sk_buff *skb = NULL; QL_DUMP_IB_MAC_RSP(ib_mac_rsp); skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); if (unlikely(!skb)) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "No skb available, drop packet.\n"); rx_ring->rx_dropped++; return; } /* Frame error, so drop the packet. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { netif_info(qdev, drv, qdev->ndev, "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); dev_kfree_skb_any(skb); rx_ring->rx_errors++; return; } /* The max framesize filter on this chip is set higher than * MTU since FCoE uses 2k frames. */ if (skb->len > ndev->mtu + ETH_HLEN) { dev_kfree_skb_any(skb); rx_ring->rx_dropped++; return; } /* loopback self test for ethtool */ if (test_bit(QL_SELFTEST, &qdev->flags)) { ql_check_lb_frame(qdev, skb); dev_kfree_skb_any(skb); return; } prefetch(skb->data); skb->dev = ndev; if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_HASH ? "Hash" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_REG ? "Registered" : (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); rx_ring->rx_multicast++; } if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Promiscuous Packet.\n"); } skb->protocol = eth_type_trans(skb, ndev); skb_checksum_none_assert(skb); /* If rx checksum is on, and there are no * csum or frame errors. */ if ((ndev->features & NETIF_F_RXCSUM) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { /* TCP frame. */ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); skb->ip_summed = CHECKSUM_UNNECESSARY; } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { /* Unfragmented ipv4 UDP frame. */ struct iphdr *iph = (struct iphdr *) skb->data; if (!(iph->frag_off & ntohs(IP_MF|IP_OFFSET))) { skb->ip_summed = CHECKSUM_UNNECESSARY; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "TCP checksum done!\n"); } } } rx_ring->rx_packets++; rx_ring->rx_bytes += skb->len; skb_record_rx_queue(skb, rx_ring->cq_id); if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) __vlan_hwaccel_put_tag(skb, vlan_id); if (skb->ip_summed == CHECKSUM_UNNECESSARY) napi_gro_receive(&rx_ring->napi, skb); else netif_receive_skb(skb); } /* Process an inbound completion from an rx ring. */ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, struct rx_ring *rx_ring, struct ib_mac_iocb_rsp *ib_mac_rsp) { u32 length = le32_to_cpu(ib_mac_rsp->data_len); u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? ((le16_to_cpu(ib_mac_rsp->vlan_id) & IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; QL_DUMP_IB_MAC_RSP(ib_mac_rsp); if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { /* The data and headers are split into * separate buffers. */ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, vlan_id); } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { /* The data fit in a single small buffer. * Allocate a new skb, copy the data and * return the buffer to the free pool. */ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { /* TCP packet in a page chunk that's been checksummed. * Tack it on to our GRO skb and let it go. */ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { /* Non-TCP packet in a page chunk. Allocate an * skb, tack it on frags, and send it up. */ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length, vlan_id); } else { /* Non-TCP/UDP large frames that span multiple buffers * can be processed corrrectly by the split frame logic. */ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, vlan_id); } return (unsigned long)length; } /* Process an outbound completion from an rx ring. */ static void ql_process_mac_tx_intr(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *mac_rsp) { struct tx_ring *tx_ring; struct tx_ring_desc *tx_ring_desc; QL_DUMP_OB_MAC_RSP(mac_rsp); tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; tx_ring_desc = &tx_ring->q[mac_rsp->tid]; ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); tx_ring->tx_bytes += (tx_ring_desc->skb)->len; tx_ring->tx_packets++; dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | OB_MAC_IOCB_RSP_S | OB_MAC_IOCB_RSP_L | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { netif_warn(qdev, tx_done, qdev->ndev, "Total descriptor length did not match transfer length.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { netif_warn(qdev, tx_done, qdev->ndev, "Frame too short to be valid, not sent.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { netif_warn(qdev, tx_done, qdev->ndev, "Frame too long, but sent anyway.\n"); } if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { netif_warn(qdev, tx_done, qdev->ndev, "PCI backplane error. Frame not sent.\n"); } } atomic_inc(&tx_ring->tx_count); } /* Fire up a handler to reset the MPI processor. */ void ql_queue_fw_error(struct ql_adapter *qdev) { ql_link_off(qdev); queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); } void ql_queue_asic_error(struct ql_adapter *qdev) { ql_link_off(qdev); ql_disable_interrupts(qdev); /* Clear adapter up bit to signal the recovery * process that it shouldn't kill the reset worker * thread */ clear_bit(QL_ADAPTER_UP, &qdev->flags); /* Set asic recovery bit to indicate reset process that we are * in fatal error recovery process rather than normal close */ set_bit(QL_ASIC_RECOVERY, &qdev->flags); queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); } static void ql_process_chip_ae_intr(struct ql_adapter *qdev, struct ib_ae_iocb_rsp *ib_ae_rsp) { switch (ib_ae_rsp->event) { case MGMT_ERR_EVENT: netif_err(qdev, rx_err, qdev->ndev, "Management Processor Fatal Error.\n"); ql_queue_fw_error(qdev); return; case CAM_LOOKUP_ERR_EVENT: netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); netdev_err(qdev->ndev, "This event shouldn't occur.\n"); ql_queue_asic_error(qdev); return; case SOFT_ECC_ERROR_EVENT: netdev_err(qdev->ndev, "Soft ECC error detected.\n"); ql_queue_asic_error(qdev); break; case PCI_ERR_ANON_BUF_RD: netdev_err(qdev->ndev, "PCI error occurred when reading " "anonymous buffers from rx_ring %d.\n", ib_ae_rsp->q_id); ql_queue_asic_error(qdev); break; default: netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", ib_ae_rsp->event); ql_queue_asic_error(qdev); break; } } static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) { struct ql_adapter *qdev = rx_ring->qdev; u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ob_mac_iocb_rsp *net_rsp = NULL; int count = 0; struct tx_ring *tx_ring; /* While there are entries in the completion queue. */ while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; rmb(); switch (net_rsp->opcode) { case OPCODE_OB_MAC_TSO_IOCB: case OPCODE_OB_MAC_IOCB: ql_process_mac_tx_intr(qdev, net_rsp); break; default: netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Hit default case, not handled! dropping the packet, opcode = %x.\n", net_rsp->opcode); } count++; ql_update_cq(rx_ring); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); } if (!net_rsp) return 0; ql_write_cq_idx(rx_ring); tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { if (atomic_read(&tx_ring->queue_stopped) && (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) /* * The queue got stopped because the tx_ring was full. * Wake it up, because it's now at least 25% empty. */ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); } return count; } static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) { struct ql_adapter *qdev = rx_ring->qdev; u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); struct ql_net_rsp_iocb *net_rsp; int count = 0; /* While there are entries in the completion queue. */ while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; rmb(); switch (net_rsp->opcode) { case OPCODE_IB_MAC_IOCB: ql_process_mac_rx_intr(qdev, rx_ring, (struct ib_mac_iocb_rsp *) net_rsp); break; case OPCODE_IB_AE_IOCB: ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) net_rsp); break; default: netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Hit default case, not handled! dropping the packet, opcode = %x.\n", net_rsp->opcode); break; } count++; ql_update_cq(rx_ring); prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); if (count == budget) break; } ql_update_buffer_queues(qdev, rx_ring); ql_write_cq_idx(rx_ring); return count; } static int ql_napi_poll_msix(struct napi_struct *napi, int budget) { struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); struct ql_adapter *qdev = rx_ring->qdev; struct rx_ring *trx_ring; int i, work_done = 0; struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); /* Service the TX rings first. They start * right after the RSS rings. */ for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { trx_ring = &qdev->rx_ring[i]; /* If this TX completion ring belongs to this vector and * it's not empty then service it. */ if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != trx_ring->cnsmr_idx)) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "%s: Servicing TX completion ring %d.\n", __func__, trx_ring->cq_id); ql_clean_outbound_rx_ring(trx_ring); } } /* * Now service the RSS ring if it's active. */ if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "%s: Servicing RX completion ring %d.\n", __func__, rx_ring->cq_id); work_done = ql_clean_inbound_rx_ring(rx_ring, budget); } if (work_done < budget) { napi_complete(napi); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; } static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) { struct ql_adapter *qdev = netdev_priv(ndev); if (features & NETIF_F_HW_VLAN_RX) { ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | NIC_RCV_CFG_VLAN_MATCH_AND_NON); } else { ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); } } static netdev_features_t qlge_fix_features(struct net_device *ndev, netdev_features_t features) { /* * Since there is no support for separate rx/tx vlan accel * enable/disable make sure tx flag is always in same state as rx. */ if (features & NETIF_F_HW_VLAN_RX) features |= NETIF_F_HW_VLAN_TX; else features &= ~NETIF_F_HW_VLAN_TX; return features; } static int qlge_set_features(struct net_device *ndev, netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; if (changed & NETIF_F_HW_VLAN_RX) qlge_vlan_mode(ndev, features); return 0; } static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = MAC_ADDR_E; int err; err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid); if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to init vlan address.\n"); return err; } static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; err = __qlge_vlan_rx_add_vid(qdev, vid); set_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return err; } static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) { u32 enable_bit = 0; int err; err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid); if (err) netif_err(qdev, ifup, qdev->ndev, "Failed to clear vlan address.\n"); return err; } static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) { struct ql_adapter *qdev = netdev_priv(ndev); int status; int err; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; err = __qlge_vlan_rx_kill_vid(qdev, vid); clear_bit(vid, qdev->active_vlans); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return err; } static void qlge_restore_vlan(struct ql_adapter *qdev) { int status; u16 vid; status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return; for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) __qlge_vlan_rx_add_vid(qdev, vid); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; napi_schedule(&rx_ring->napi); return IRQ_HANDLED; } /* This handles a fatal error, MPI activity, and the default * rx_ring in an MSI-X multiple vector environment. * In MSI/Legacy environment it also process the rest of * the rx_rings. */ static irqreturn_t qlge_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; struct ql_adapter *qdev = rx_ring->qdev; struct intr_context *intr_context = &qdev->intr_context[0]; u32 var; int work_done = 0; spin_lock(&qdev->hw_lock); if (atomic_read(&qdev->intr_context[0].irq_cnt)) { netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, "Shared Interrupt, Not ours!\n"); spin_unlock(&qdev->hw_lock); return IRQ_NONE; } spin_unlock(&qdev->hw_lock); var = ql_disable_completion_interrupt(qdev, intr_context->intr); /* * Check for fatal error. */ if (var & STS_FE) { ql_queue_asic_error(qdev); netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); var = ql_read32(qdev, ERR_STS); netdev_err(qdev->ndev, "Resetting chip. " "Error Status Register = 0x%x\n", var); return IRQ_HANDLED; } /* * Check MPI processor activity. */ if ((var & STS_PI) && (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { /* * We've got an async event or mailbox completion. * Handle it and clear the source of the interrupt. */ netif_err(qdev, intr, qdev->ndev, "Got MPI processor interrupt.\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); queue_delayed_work_on(smp_processor_id(), qdev->workqueue, &qdev->mpi_work, 0); work_done++; } /* * Get the bit-mask that shows the active queues for this * pass. Compare it to the queues that this irq services * and call napi if there's a match. */ var = ql_read32(qdev, ISR1); if (var & intr_context->irq_mask) { netif_info(qdev, intr, qdev->ndev, "Waking handler for rx_ring[0].\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); napi_schedule(&rx_ring->napi); work_done++; } ql_enable_completion_interrupt(qdev, intr_context->intr); return work_done ? IRQ_HANDLED : IRQ_NONE; } static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) { if (skb_is_gso(skb)) { int err; if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); mac_iocb_ptr->net_trans_offset = cpu_to_le16(skb_network_offset(skb) | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; if (likely(skb->protocol == htons(ETH_P_IP))) { struct iphdr *iph = ip_hdr(skb); iph->check = 0; mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); } else if (skb->protocol == htons(ETH_P_IPV6)) { mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } return 1; } return 0; } static void ql_hw_csum_setup(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) { int len; struct iphdr *iph = ip_hdr(skb); __sum16 *check; mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); mac_iocb_ptr->net_trans_offset = cpu_to_le16(skb_network_offset(skb) | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; len = (ntohs(iph->tot_len) - (iph->ihl << 2)); if (likely(iph->protocol == IPPROTO_TCP)) { check = &(tcp_hdr(skb)->check); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + (tcp_hdr(skb)->doff << 2)); } else { check = &(udp_hdr(skb)->check); mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; mac_iocb_ptr->total_hdrs_len = cpu_to_le16(skb_transport_offset(skb) + sizeof(struct udphdr)); } *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, len, iph->protocol, 0); } static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) { struct tx_ring_desc *tx_ring_desc; struct ob_mac_iocb_req *mac_iocb_ptr; struct ql_adapter *qdev = netdev_priv(ndev); int tso; struct tx_ring *tx_ring; u32 tx_ring_idx = (u32) skb->queue_mapping; tx_ring = &qdev->tx_ring[tx_ring_idx]; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { netif_info(qdev, tx_queued, qdev->ndev, "%s: shutting down tx queue %d du to lack of resources.\n", __func__, tx_ring_idx); netif_stop_subqueue(ndev, tx_ring->wq_id); atomic_inc(&tx_ring->queue_stopped); tx_ring->tx_errors++; return NETDEV_TX_BUSY; } tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; mac_iocb_ptr = tx_ring_desc->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; mac_iocb_ptr->tid = tx_ring_desc->index; /* We use the upper 32-bits to store the tx queue for this IO. * When we get the completion we can use it to establish the context. */ mac_iocb_ptr->txq_idx = tx_ring_idx; tx_ring_desc->skb = skb; mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); if (vlan_tx_tag_present(skb)) { netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); } tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { ql_hw_csum_setup(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); } if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) { netif_err(qdev, tx_queued, qdev->ndev, "Could not map the segments.\n"); tx_ring->tx_errors++; return NETDEV_TX_BUSY; } QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); tx_ring->prod_idx++; if (tx_ring->prod_idx == tx_ring->wq_len) tx_ring->prod_idx = 0; wmb(); ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, "tx queued, slot %d, len %d\n", tx_ring->prod_idx, skb->len); atomic_dec(&tx_ring->tx_count); return NETDEV_TX_OK; } static void ql_free_shadow_space(struct ql_adapter *qdev) { if (qdev->rx_ring_shadow_reg_area) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->rx_ring_shadow_reg_area, qdev->rx_ring_shadow_reg_dma); qdev->rx_ring_shadow_reg_area = NULL; } if (qdev->tx_ring_shadow_reg_area) { pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->tx_ring_shadow_reg_area, qdev->tx_ring_shadow_reg_dma); qdev->tx_ring_shadow_reg_area = NULL; } } static int ql_alloc_shadow_space(struct ql_adapter *qdev) { qdev->rx_ring_shadow_reg_area = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); if (qdev->rx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of RX shadow space failed.\n"); return -ENOMEM; } memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); qdev->tx_ring_shadow_reg_area = pci_alloc_consistent(qdev->pdev, PAGE_SIZE, &qdev->tx_ring_shadow_reg_dma); if (qdev->tx_ring_shadow_reg_area == NULL) { netif_err(qdev, ifup, qdev->ndev, "Allocation of TX shadow space failed.\n"); goto err_wqp_sh_area; } memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); return 0; err_wqp_sh_area: pci_free_consistent(qdev->pdev, PAGE_SIZE, qdev->rx_ring_shadow_reg_area, qdev->rx_ring_shadow_reg_dma); return -ENOMEM; } static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) { struct tx_ring_desc *tx_ring_desc; int i; struct ob_mac_iocb_req *mac_iocb_ptr; mac_iocb_ptr = tx_ring->wq_base; tx_ring_desc = tx_ring->q; for (i = 0; i < tx_ring->wq_len; i++) { tx_ring_desc->index = i; tx_ring_desc->skb = NULL; tx_ring_desc->queue_entry = mac_iocb_ptr; mac_iocb_ptr++; tx_ring_desc++; } atomic_set(&tx_ring->tx_count, tx_ring->wq_len); atomic_set(&tx_ring->queue_stopped, 0); } static void ql_free_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) { if (tx_ring->wq_base) { pci_free_consistent(qdev->pdev, tx_ring->wq_size, tx_ring->wq_base, tx_ring->wq_base_dma); tx_ring->wq_base = NULL; } kfree(tx_ring->q); tx_ring->q = NULL; } static int ql_alloc_tx_resources(struct ql_adapter *qdev, struct tx_ring *tx_ring) { tx_ring->wq_base = pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, &tx_ring->wq_base_dma); if ((tx_ring->wq_base == NULL) || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); return -ENOMEM; } tx_ring->q = kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); if (tx_ring->q == NULL) goto err; return 0; err: pci_free_consistent(qdev->pdev, tx_ring->wq_size, tx_ring->wq_base, tx_ring->wq_base_dma); return -ENOMEM; } static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct bq_desc *lbq_desc; uint32_t curr_idx, clean_idx; curr_idx = rx_ring->lbq_curr_idx; clean_idx = rx_ring->lbq_clean_idx; while (curr_idx != clean_idx) { lbq_desc = &rx_ring->lbq[curr_idx]; if (lbq_desc->p.pg_chunk.last_flag) { pci_unmap_page(qdev->pdev, lbq_desc->p.pg_chunk.map, ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); lbq_desc->p.pg_chunk.last_flag = 0; } put_page(lbq_desc->p.pg_chunk.page); lbq_desc->p.pg_chunk.page = NULL; if (++curr_idx == rx_ring->lbq_len) curr_idx = 0; } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *sbq_desc; for (i = 0; i < rx_ring->sbq_len; i++) { sbq_desc = &rx_ring->sbq[i]; if (sbq_desc == NULL) { netif_err(qdev, ifup, qdev->ndev, "sbq_desc %d is NULL.\n", i); return; } if (sbq_desc->p.skb) { pci_unmap_single(qdev->pdev, dma_unmap_addr(sbq_desc, mapaddr), dma_unmap_len(sbq_desc, maplen), PCI_DMA_FROMDEVICE); dev_kfree_skb(sbq_desc->p.skb); sbq_desc->p.skb = NULL; } } } /* Free all large and small rx buffers associated * with the completion queues for this device. */ static void ql_free_rx_buffers(struct ql_adapter *qdev) { int i; struct rx_ring *rx_ring; for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; if (rx_ring->lbq) ql_free_lbq_buffers(qdev, rx_ring); if (rx_ring->sbq) ql_free_sbq_buffers(qdev, rx_ring); } } static void ql_alloc_rx_buffers(struct ql_adapter *qdev) { struct rx_ring *rx_ring; int i; for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; if (rx_ring->type != TX_Q) ql_update_buffer_queues(qdev, rx_ring); } } static void ql_init_lbq_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *lbq_desc; __le64 *bq = rx_ring->lbq_base; memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); for (i = 0; i < rx_ring->lbq_len; i++) { lbq_desc = &rx_ring->lbq[i]; memset(lbq_desc, 0, sizeof(*lbq_desc)); lbq_desc->index = i; lbq_desc->addr = bq; bq++; } } static void ql_init_sbq_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *sbq_desc; __le64 *bq = rx_ring->sbq_base; memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); for (i = 0; i < rx_ring->sbq_len; i++) { sbq_desc = &rx_ring->sbq[i]; memset(sbq_desc, 0, sizeof(*sbq_desc)); sbq_desc->index = i; sbq_desc->addr = bq; bq++; } } static void ql_free_rx_resources(struct ql_adapter *qdev, struct rx_ring *rx_ring) { /* Free the small buffer queue. */ if (rx_ring->sbq_base) { pci_free_consistent(qdev->pdev, rx_ring->sbq_size, rx_ring->sbq_base, rx_ring->sbq_base_dma); rx_ring->sbq_base = NULL; } /* Free the small buffer queue control blocks. */ kfree(rx_ring->sbq); rx_ring->sbq = NULL; /* Free the large buffer queue. */ if (rx_ring->lbq_base) { pci_free_consistent(qdev->pdev, rx_ring->lbq_size, rx_ring->lbq_base, rx_ring->lbq_base_dma); rx_ring->lbq_base = NULL; } /* Free the large buffer queue control blocks. */ kfree(rx_ring->lbq); rx_ring->lbq = NULL; /* Free the rx queue. */ if (rx_ring->cq_base) { pci_free_consistent(qdev->pdev, rx_ring->cq_size, rx_ring->cq_base, rx_ring->cq_base_dma); rx_ring->cq_base = NULL; } } /* Allocate queues and buffers for this completions queue based * on the values in the parameter structure. */ static int ql_alloc_rx_resources(struct ql_adapter *qdev, struct rx_ring *rx_ring) { /* * Allocate the completion queue for this rx_ring. */ rx_ring->cq_base = pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, &rx_ring->cq_base_dma); if (rx_ring->cq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); return -ENOMEM; } if (rx_ring->sbq_len) { /* * Allocate small buffer queue. */ rx_ring->sbq_base = pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, &rx_ring->sbq_base_dma); if (rx_ring->sbq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "Small buffer queue allocation failed.\n"); goto err_mem; } /* * Allocate small buffer queue control blocks. */ rx_ring->sbq = kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), GFP_KERNEL); if (rx_ring->sbq == NULL) { netif_err(qdev, ifup, qdev->ndev, "Small buffer queue control block allocation failed.\n"); goto err_mem; } ql_init_sbq_ring(qdev, rx_ring); } if (rx_ring->lbq_len) { /* * Allocate large buffer queue. */ rx_ring->lbq_base = pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, &rx_ring->lbq_base_dma); if (rx_ring->lbq_base == NULL) { netif_err(qdev, ifup, qdev->ndev, "Large buffer queue allocation failed.\n"); goto err_mem; } /* * Allocate large buffer queue control blocks. */ rx_ring->lbq = kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), GFP_KERNEL); if (rx_ring->lbq == NULL) { netif_err(qdev, ifup, qdev->ndev, "Large buffer queue control block allocation failed.\n"); goto err_mem; } ql_init_lbq_ring(qdev, rx_ring); } return 0; err_mem: ql_free_rx_resources(qdev, rx_ring); return -ENOMEM; } static void ql_tx_ring_clean(struct ql_adapter *qdev) { struct tx_ring *tx_ring; struct tx_ring_desc *tx_ring_desc; int i, j; /* * Loop through all queues and free * any resources. */ for (j = 0; j < qdev->tx_ring_count; j++) { tx_ring = &qdev->tx_ring[j]; for (i = 0; i < tx_ring->wq_len; i++) { tx_ring_desc = &tx_ring->q[i]; if (tx_ring_desc && tx_ring_desc->skb) { netif_err(qdev, ifdown, qdev->ndev, "Freeing lost SKB %p, from queue %d, index %d.\n", tx_ring_desc->skb, j, tx_ring_desc->index); ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); dev_kfree_skb(tx_ring_desc->skb); tx_ring_desc->skb = NULL; } } } } static void ql_free_mem_resources(struct ql_adapter *qdev) { int i; for (i = 0; i < qdev->tx_ring_count; i++) ql_free_tx_resources(qdev, &qdev->tx_ring[i]); for (i = 0; i < qdev->rx_ring_count; i++) ql_free_rx_resources(qdev, &qdev->rx_ring[i]); ql_free_shadow_space(qdev); } static int ql_alloc_mem_resources(struct ql_adapter *qdev) { int i; /* Allocate space for our shadow registers and such. */ if (ql_alloc_shadow_space(qdev)) return -ENOMEM; for (i = 0; i < qdev->rx_ring_count; i++) { if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { netif_err(qdev, ifup, qdev->ndev, "RX resource allocation failed.\n"); goto err_mem; } } /* Allocate tx queue resources */ for (i = 0; i < qdev->tx_ring_count; i++) { if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { netif_err(qdev, ifup, qdev->ndev, "TX resource allocation failed.\n"); goto err_mem; } } return 0; err_mem: ql_free_mem_resources(qdev); return -ENOMEM; } /* Set up the rx ring control block and pass it to the chip. * The control block is defined as * "Completion Queue Initialization Control Block", or cqicb. */ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) { struct cqicb *cqicb = &rx_ring->cqicb; void *shadow_reg = qdev->rx_ring_shadow_reg_area + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + (rx_ring->cq_id * RX_RING_SHADOW_SPACE); void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); int err = 0; u16 bq_len; u64 tmp; __le64 *base_indirect_ptr; int page_entries; /* Set up the shadow registers for this ring. */ rx_ring->prod_idx_sh_reg = shadow_reg; rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; *rx_ring->prod_idx_sh_reg = 0; shadow_reg += sizeof(u64); shadow_reg_dma += sizeof(u64); rx_ring->lbq_base_indirect = shadow_reg; rx_ring->lbq_base_indirect_dma = shadow_reg_dma; shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); rx_ring->sbq_base_indirect = shadow_reg; rx_ring->sbq_base_indirect_dma = shadow_reg_dma; /* PCI doorbell mem area + 0x00 for consumer index register */ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; rx_ring->cnsmr_idx = 0; rx_ring->curr_entry = rx_ring->cq_base; /* PCI doorbell mem area + 0x04 for valid register */ rx_ring->valid_db_reg = doorbell_area + 0x04; /* PCI doorbell mem area + 0x18 for large buffer consumer */ rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); /* PCI doorbell mem area + 0x1c */ rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); memset((void *)cqicb, 0, sizeof(struct cqicb)); cqicb->msix_vect = rx_ring->irq; bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); /* * Set up the control block load flags. */ cqicb->flags = FLAGS_LC | /* Load queue base address */ FLAGS_LV | /* Load MSI-X vector */ FLAGS_LI; /* Load irq delay values */ if (rx_ring->lbq_len) { cqicb->flags |= FLAGS_LL; /* Load lbq values */ tmp = (u64)rx_ring->lbq_base_dma; base_indirect_ptr = rx_ring->lbq_base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq_base_indirect_dma); bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : (u16) rx_ring->lbq_buf_size; cqicb->lbq_buf_size = cpu_to_le16(bq_len); bq_len = (rx_ring->lbq_len == 65536) ? 0 : (u16) rx_ring->lbq_len; cqicb->lbq_len = cpu_to_le16(bq_len); rx_ring->lbq_prod_idx = 0; rx_ring->lbq_curr_idx = 0; rx_ring->lbq_clean_idx = 0; rx_ring->lbq_free_cnt = rx_ring->lbq_len; } if (rx_ring->sbq_len) { cqicb->flags |= FLAGS_LS; /* Load sbq values */ tmp = (u64)rx_ring->sbq_base_dma; base_indirect_ptr = rx_ring->sbq_base_indirect; page_entries = 0; do { *base_indirect_ptr = cpu_to_le64(tmp); tmp += DB_PAGE_SIZE; base_indirect_ptr++; page_entries++; } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq_base_indirect_dma); cqicb->sbq_buf_size = cpu_to_le16((u16)(rx_ring->sbq_buf_size)); bq_len = (rx_ring->sbq_len == 65536) ? 0 : (u16) rx_ring->sbq_len; cqicb->sbq_len = cpu_to_le16(bq_len); rx_ring->sbq_prod_idx = 0; rx_ring->sbq_curr_idx = 0; rx_ring->sbq_clean_idx = 0; rx_ring->sbq_free_cnt = rx_ring->sbq_len; } switch (rx_ring->type) { case TX_Q: cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); break; case RX_Q: /* Inbound completion handling rx_rings run in * separate NAPI contexts. */ netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, 64); cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); break; default: netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Invalid rx_ring->type = %d.\n", rx_ring->type); } err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), CFG_LCQ, rx_ring->cq_id); if (err) { netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); return err; } return err; } static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) { struct wqicb *wqicb = (struct wqicb *)tx_ring; void __iomem *doorbell_area = qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); void *shadow_reg = qdev->tx_ring_shadow_reg_area + (tx_ring->wq_id * sizeof(u64)); u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + (tx_ring->wq_id * sizeof(u64)); int err = 0; /* * Assign doorbell registers for this tx_ring. */ /* TX PCI doorbell mem area for tx producer index */ tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; tx_ring->prod_idx = 0; /* TX PCI doorbell mem area + 0x04 */ tx_ring->valid_db_reg = doorbell_area + 0x04; /* * Assign shadow registers for this tx_ring. */ tx_ring->cnsmr_idx_sh_reg = shadow_reg; tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); wqicb->flags = cpu_to_le16(Q_FLAGS_LC | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); wqicb->rid = 0; wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); ql_init_tx_ring(qdev, tx_ring); err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, (u16) tx_ring->wq_id); if (err) { netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); return err; } return err; } static void ql_disable_msix(struct ql_adapter *qdev) { if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { pci_disable_msix(qdev->pdev); clear_bit(QL_MSIX_ENABLED, &qdev->flags); kfree(qdev->msi_x_entry); qdev->msi_x_entry = NULL; } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { pci_disable_msi(qdev->pdev); clear_bit(QL_MSI_ENABLED, &qdev->flags); } } /* We start by trying to get the number of vectors * stored in qdev->intr_count. If we don't get that * many then we reduce the count and try again. */ static void ql_enable_msix(struct ql_adapter *qdev) { int i, err; /* Get the MSIX vectors. */ if (qlge_irq_type == MSIX_IRQ) { /* Try to alloc space for the msix struct, * if it fails then go to MSI/legacy. */ qdev->msi_x_entry = kcalloc(qdev->intr_count, sizeof(struct msix_entry), GFP_KERNEL); if (!qdev->msi_x_entry) { qlge_irq_type = MSI_IRQ; goto msi; } for (i = 0; i < qdev->intr_count; i++) qdev->msi_x_entry[i].entry = i; /* Loop to get our vectors. We start with * what we want and settle for what we get. */ do { err = pci_enable_msix(qdev->pdev, qdev->msi_x_entry, qdev->intr_count); if (err > 0) qdev->intr_count = err; } while (err > 0); if (err < 0) { kfree(qdev->msi_x_entry); qdev->msi_x_entry = NULL; netif_warn(qdev, ifup, qdev->ndev, "MSI-X Enable failed, trying MSI.\n"); qdev->intr_count = 1; qlge_irq_type = MSI_IRQ; } else if (err == 0) { set_bit(QL_MSIX_ENABLED, &qdev->flags); netif_info(qdev, ifup, qdev->ndev, "MSI-X Enabled, got %d vectors.\n", qdev->intr_count); return; } } msi: qdev->intr_count = 1; if (qlge_irq_type == MSI_IRQ) { if (!pci_enable_msi(qdev->pdev)) { set_bit(QL_MSI_ENABLED, &qdev->flags); netif_info(qdev, ifup, qdev->ndev, "Running with MSI interrupts.\n"); return; } } qlge_irq_type = LEG_IRQ; netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Running with legacy interrupts.\n"); } /* Each vector services 1 RSS ring and and 1 or more * TX completion rings. This function loops through * the TX completion rings and assigns the vector that * will service it. An example would be if there are * 2 vectors (so 2 RSS rings) and 8 TX completion rings. * This would mean that vector 0 would service RSS ring 0 * and TX completion rings 0,1,2 and 3. Vector 1 would * service RSS ring 1 and TX completion rings 4,5,6 and 7. */ static void ql_set_tx_vect(struct ql_adapter *qdev) { int i, j, vect; u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Assign irq vectors to TX rx_rings.*/ for (vect = 0, j = 0, i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { if (j == tx_rings_per_vector) { vect++; j = 0; } qdev->rx_ring[i].irq = vect; j++; } } else { /* For single vector all rings have an irq * of zero. */ for (i = 0; i < qdev->rx_ring_count; i++) qdev->rx_ring[i].irq = 0; } } /* Set the interrupt mask for this vector. Each vector * will service 1 RSS ring and 1 or more TX completion * rings. This function sets up a bit mask per vector * that indicates which rings it services. */ static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) { int j, vect = ctx->intr; u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Add the RSS ring serviced by this vector * to the mask. */ ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); /* Add the TX ring(s) serviced by this vector * to the mask. */ for (j = 0; j < tx_rings_per_vector; j++) { ctx->irq_mask |= (1 << qdev->rx_ring[qdev->rss_ring_count + (vect * tx_rings_per_vector) + j].cq_id); } } else { /* For single vector we just shift each queue's * ID into the mask. */ for (j = 0; j < qdev->rx_ring_count; j++) ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); } } /* * Here we build the intr_context structures based on * our rx_ring count and intr vector count. * The intr_context structure is used to hook each vector * to possibly different handlers. */ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) { int i = 0; struct intr_context *intr_context = &qdev->intr_context[0]; if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { /* Each rx_ring has it's * own intr_context since we have separate * vectors for each queue. */ for (i = 0; i < qdev->intr_count; i++, intr_context++) { qdev->rx_ring[i].irq = i; intr_context->intr = i; intr_context->qdev = qdev; /* Set up this vector's bit-mask that indicates * which queues it services. */ ql_set_irq_mask(qdev, intr_context); /* * We set up each vectors enable/disable/read bits so * there's no bit/mask calculations in the critical path. */ intr_context->intr_en_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | i; intr_context->intr_dis_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | i; intr_context->intr_read_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | i; if (i == 0) { /* The first vector/queue handles * broadcast/multicast, fatal errors, * and firmware events. This in addition * to normal inbound NAPI processing. */ intr_context->handler = qlge_isr; sprintf(intr_context->name, "%s-rx-%d", qdev->ndev->name, i); } else { /* * Inbound queues handle unicast frames only. */ intr_context->handler = qlge_msix_rx_isr; sprintf(intr_context->name, "%s-rx-%d", qdev->ndev->name, i); } } } else { /* * All rx_rings use the same intr_context since * there is only one vector. */ intr_context->intr = 0; intr_context->qdev = qdev; /* * We set up each vectors enable/disable/read bits so * there's no bit/mask calculations in the critical path. */ intr_context->intr_en_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; intr_context->intr_dis_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_DISABLE; intr_context->intr_read_mask = INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; /* * Single interrupt means one handler for all rings. */ intr_context->handler = qlge_isr; sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); /* Set up this vector's bit-mask that indicates * which queues it services. In this case there is * a single vector so it will service all RSS and * TX completion rings. */ ql_set_irq_mask(qdev, intr_context); } /* Tell the TX completion rings which MSIx vector * they will be using. */ ql_set_tx_vect(qdev); } static void ql_free_irq(struct ql_adapter *qdev) { int i; struct intr_context *intr_context = &qdev->intr_context[0]; for (i = 0; i < qdev->intr_count; i++, intr_context++) { if (intr_context->hooked) { if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { free_irq(qdev->msi_x_entry[i].vector, &qdev->rx_ring[i]); } else { free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); } } } ql_disable_msix(qdev); } static int ql_request_irq(struct ql_adapter *qdev) { int i; int status = 0; struct pci_dev *pdev = qdev->pdev; struct intr_context *intr_context = &qdev->intr_context[0]; ql_resolve_queues_to_irqs(qdev); for (i = 0; i < qdev->intr_count; i++, intr_context++) { atomic_set(&intr_context->irq_cnt, 0); if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { status = request_irq(qdev->msi_x_entry[i].vector, intr_context->handler, 0, intr_context->name, &qdev->rx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed request for MSIX interrupt %d.\n", i); goto err_irq; } } else { netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "trying msi or legacy interrupts.\n"); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: irq = %d.\n", __func__, pdev->irq); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: context->name = %s.\n", __func__, intr_context->name); netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "%s: dev_id = 0x%p.\n", __func__, &qdev->rx_ring[0]); status = request_irq(pdev->irq, qlge_isr, test_bit(QL_MSI_ENABLED, &qdev-> flags) ? 0 : IRQF_SHARED, intr_context->name, &qdev->rx_ring[0]); if (status) goto err_irq; netif_err(qdev, ifup, qdev->ndev, "Hooked intr %d, queue type %s, with name %s.\n", i, qdev->rx_ring[0].type == DEFAULT_Q ? "DEFAULT_Q" : qdev->rx_ring[0].type == TX_Q ? "TX_Q" : qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", intr_context->name); } intr_context->hooked = 1; } return status; err_irq: netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); ql_free_irq(qdev); return status; } static int ql_start_rss(struct ql_adapter *qdev) { static const u8 init_hash_seed[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; struct ricb *ricb = &qdev->ricb; int status = 0; int i; u8 *hash_id = (u8 *) ricb->hash_cq_id; memset((void *)ricb, 0, sizeof(*ricb)); ricb->base_cq = RSS_L4K; ricb->flags = (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); ricb->mask = cpu_to_le16((u16)(0x3ff)); /* * Fill out the Indirection Table. */ for (i = 0; i < 1024; i++) hash_id[i] = (i & (qdev->rss_ring_count - 1)); memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); return status; } return status; } static int ql_clear_routing_entries(struct ql_adapter *qdev) { int i, status = 0; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; /* Clear all the entries in the routing table. */ for (i = 0; i < 16; i++) { status = ql_set_routing_reg(qdev, i, 0, 0); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for CAM packets.\n"); break; } } ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } /* Initialize the frame-to-queue routing. */ static int ql_route_initialize(struct ql_adapter *qdev) { int status = 0; /* Clear all the entries in the routing table. */ status = ql_clear_routing_entries(qdev); if (status) return status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return status; status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, RT_IDX_IP_CSUM_ERR, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register " "for IP CSUM error packets.\n"); goto exit; } status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, RT_IDX_TU_CSUM_ERR, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register " "for TCP/UDP CSUM error packets.\n"); goto exit; } status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for broadcast packets.\n"); goto exit; } /* If we have more than one inbound queue, then turn on RSS in the * routing block. */ if (qdev->rss_ring_count > 1) { status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, RT_IDX_RSS_MATCH, 1); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for MATCH RSS packets.\n"); goto exit; } } status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, RT_IDX_CAM_HIT, 1); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init routing register for CAM packets.\n"); exit: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); return status; } int ql_cam_route_initialize(struct ql_adapter *qdev) { int status, set; /* If check if the link is up and use to * determine if we are setting or clearing * the MAC address in the CAM. */ set = ql_read32(qdev, STS); set &= qdev->port_link_up; status = ql_set_mac_addr(qdev, set); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); return status; } status = ql_route_initialize(qdev); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); return status; } static int ql_adapter_initialize(struct ql_adapter *qdev) { u32 value, mask; int i; int status = 0; /* * Set up the System register to halt on errors. */ value = SYS_EFE | SYS_FAE; mask = value << 16; ql_write32(qdev, SYS, mask | value); /* Set the default queue, and VLAN behavior. */ value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); ql_write32(qdev, NIC_RCV_CFG, (mask | value)); /* Set the MPI interrupt to enabled. */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); /* Enable the function, set pagesize, enable error checking. */ value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | FSC_EC | FSC_VM_PAGE_4K; value |= SPLT_SETTING; /* Set/clear header splitting. */ mask = FSC_VM_PAGESIZE_MASK | FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); ql_write32(qdev, FSC, mask | value); ql_write32(qdev, SPLT_HDR, SPLT_LEN); /* Set RX packet routing to use port/pci function on which the * packet arrived on in addition to usual frame routing. * This is helpful on bonding where both interfaces can have * the same MAC address. */ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); /* Reroute all packets to our Interface. * They may have been routed to MPI firmware * due to WOL. */ value = ql_read32(qdev, MGMT_RCV_CFG); value &= ~MGMT_RCV_CFG_RM; mask = 0xffff0000; /* Sticky reg needs clearing due to WOL. */ ql_write32(qdev, MGMT_RCV_CFG, mask); ql_write32(qdev, MGMT_RCV_CFG, mask | value); /* Default WOL is enable on Mezz cards */ if (qdev->pdev->subsystem_device == 0x0068 || qdev->pdev->subsystem_device == 0x0180) qdev->wol = WAKE_MAGIC; /* Start up the rx queues. */ for (i = 0; i < qdev->rx_ring_count; i++) { status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start rx ring[%d].\n", i); return status; } } /* If there is more than one inbound completion queue * then download a RICB to configure RSS. */ if (qdev->rss_ring_count > 1) { status = ql_start_rss(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); return status; } } /* Start up the tx queues. */ for (i = 0; i < qdev->tx_ring_count; i++) { status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to start tx ring[%d].\n", i); return status; } } /* Initialize the port and set the max framesize. */ status = qdev->nic_ops->port_initialize(qdev); if (status) netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); /* Set up the MAC address and frame routing filter. */ status = ql_cam_route_initialize(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to init CAM/Routing tables.\n"); return status; } /* Start NAPI for the RSS queues. */ for (i = 0; i < qdev->rss_ring_count; i++) napi_enable(&qdev->rx_ring[i].napi); return status; } /* Issue soft reset to chip. */ static int ql_adapter_reset(struct ql_adapter *qdev) { u32 value; int status = 0; unsigned long end_jiffies; /* Clear all the entries in the routing table. */ status = ql_clear_routing_entries(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); return status; } end_jiffies = jiffies + max((unsigned long)1, usecs_to_jiffies(30)); /* Check if bit is set then skip the mailbox command and * clear the bit, else we are in normal reset process. */ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { /* Stop management traffic. */ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); /* Wait for the NIC and MGMNT FIFOs to empty. */ ql_wait_fifo_empty(qdev); } else clear_bit(QL_ASIC_RECOVERY, &qdev->flags); ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); do { value = ql_read32(qdev, RST_FO); if ((value & RST_FO_FR) == 0) break; cpu_relax(); } while (time_before(jiffies, end_jiffies)); if (value & RST_FO_FR) { netif_err(qdev, ifdown, qdev->ndev, "ETIMEDOUT!!! errored out of resetting the chip!\n"); status = -ETIMEDOUT; } /* Resume management traffic. */ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); return status; } static void ql_display_dev_info(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); netif_info(qdev, probe, qdev->ndev, "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " "XG Roll = %d, XG Rev = %d.\n", qdev->func, qdev->port, qdev->chip_rev_id & 0x0000000f, qdev->chip_rev_id >> 4 & 0x0000000f, qdev->chip_rev_id >> 8 & 0x0000000f, qdev->chip_rev_id >> 12 & 0x0000000f); netif_info(qdev, probe, qdev->ndev, "MAC address %pM\n", ndev->dev_addr); } static int ql_wol(struct ql_adapter *qdev) { int status = 0; u32 wol = MB_WOL_DISABLE; /* The CAM is still intact after a reset, but if we * are doing WOL, then we may need to program the * routing regs. We would also need to issue the mailbox * commands to instruct the MPI what to do per the ethtool * settings. */ if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) { netif_err(qdev, ifdown, qdev->ndev, "Unsupported WOL paramter. qdev->wol = 0x%x.\n", qdev->wol); return -EINVAL; } if (qdev->wol & WAKE_MAGIC) { status = ql_mb_wol_set_magic(qdev, 1); if (status) { netif_err(qdev, ifdown, qdev->ndev, "Failed to set magic packet on %s.\n", qdev->ndev->name); return status; } else netif_info(qdev, drv, qdev->ndev, "Enabled magic packet successfully on %s.\n", qdev->ndev->name); wol |= MB_WOL_MAGIC_PKT; } if (qdev->wol) { wol |= MB_WOL_MODE_ON; status = ql_mb_wol_mode(qdev, wol); netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x) on %s\n", (status == 0) ? "Successfully set" : "Failed", wol, qdev->ndev->name); } return status; } static void ql_cancel_all_work_sync(struct ql_adapter *qdev) { /* Don't kill the reset worker thread if we * are in the process of recovery. */ if (test_bit(QL_ADAPTER_UP, &qdev->flags)) cancel_delayed_work_sync(&qdev->asic_reset_work); cancel_delayed_work_sync(&qdev->mpi_reset_work); cancel_delayed_work_sync(&qdev->mpi_work); cancel_delayed_work_sync(&qdev->mpi_idc_work); cancel_delayed_work_sync(&qdev->mpi_core_to_log); cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); } static int ql_adapter_down(struct ql_adapter *qdev) { int i, status = 0; ql_link_off(qdev); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) napi_disable(&qdev->rx_ring[i].napi); clear_bit(QL_ADAPTER_UP, &qdev->flags); ql_disable_interrupts(qdev); ql_tx_ring_clean(qdev); /* Call netif_napi_del() from common point. */ for (i = 0; i < qdev->rss_ring_count; i++) netif_napi_del(&qdev->rx_ring[i].napi); status = ql_adapter_reset(qdev); if (status) netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", qdev->func); ql_free_rx_buffers(qdev); return status; } static int ql_adapter_up(struct ql_adapter *qdev) { int err = 0; err = ql_adapter_initialize(qdev); if (err) { netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); goto err_init; } set_bit(QL_ADAPTER_UP, &qdev->flags); ql_alloc_rx_buffers(qdev); /* If the port is initialized and the * link is up the turn on the carrier. */ if ((ql_read32(qdev, STS) & qdev->port_init) && (ql_read32(qdev, STS) & qdev->port_link_up)) ql_link_on(qdev); /* Restore rx mode. */ clear_bit(QL_ALLMULTI, &qdev->flags); clear_bit(QL_PROMISCUOUS, &qdev->flags); qlge_set_multicast_list(qdev->ndev); /* Restore vlan setting. */ qlge_restore_vlan(qdev); ql_enable_interrupts(qdev); ql_enable_all_completion_interrupts(qdev); netif_tx_start_all_queues(qdev->ndev); return 0; err_init: ql_adapter_reset(qdev); return err; } static void ql_release_adapter_resources(struct ql_adapter *qdev) { ql_free_mem_resources(qdev); ql_free_irq(qdev); } static int ql_get_adapter_resources(struct ql_adapter *qdev) { int status = 0; if (ql_alloc_mem_resources(qdev)) { netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); return -ENOMEM; } status = ql_request_irq(qdev); return status; } static int qlge_close(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); /* If we hit pci_channel_io_perm_failure * failure condition, then we already * brought the adapter down. */ if (test_bit(QL_EEH_FATAL, &qdev->flags)) { netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); clear_bit(QL_EEH_FATAL, &qdev->flags); return 0; } /* * Wait for device to recover from a reset. * (Rarely happens, but possible.) */ while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) msleep(1); ql_adapter_down(qdev); ql_release_adapter_resources(qdev); return 0; } static int ql_configure_rings(struct ql_adapter *qdev) { int i; struct rx_ring *rx_ring; struct tx_ring *tx_ring; int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; qdev->lbq_buf_order = get_order(lbq_buf_len); /* In a perfect world we have one RSS ring for each CPU * and each has it's own vector. To do that we ask for * cpu_cnt vectors. ql_enable_msix() will adjust the * vector count to what we actually get. We then * allocate an RSS ring for each. * Essentially, we are doing min(cpu_count, msix_vector_count). */ qdev->intr_count = cpu_cnt; ql_enable_msix(qdev); /* Adjust the RSS ring count to the actual vector count. */ qdev->rss_ring_count = qdev->intr_count; qdev->tx_ring_count = cpu_cnt; qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; for (i = 0; i < qdev->tx_ring_count; i++) { tx_ring = &qdev->tx_ring[i]; memset((void *)tx_ring, 0, sizeof(*tx_ring)); tx_ring->qdev = qdev; tx_ring->wq_id = i; tx_ring->wq_len = qdev->tx_ring_size; tx_ring->wq_size = tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); /* * The completion queue ID for the tx rings start * immediately after the rss rings. */ tx_ring->cq_id = qdev->rss_ring_count + i; } for (i = 0; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; memset((void *)rx_ring, 0, sizeof(*rx_ring)); rx_ring->qdev = qdev; rx_ring->cq_id = i; rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ if (i < qdev->rss_ring_count) { /* * Inbound (RSS) queues. */ rx_ring->cq_len = qdev->rx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq_len = NUM_LARGE_BUFFERS; rx_ring->lbq_size = rx_ring->lbq_len * sizeof(__le64); rx_ring->lbq_buf_size = (u16)lbq_buf_len; rx_ring->sbq_len = NUM_SMALL_BUFFERS; rx_ring->sbq_size = rx_ring->sbq_len * sizeof(__le64); rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; rx_ring->type = RX_Q; } else { /* * Outbound queue handles outbound completions only. */ /* outbound cq is same size as tx_ring it services. */ rx_ring->cq_len = qdev->tx_ring_size; rx_ring->cq_size = rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); rx_ring->lbq_len = 0; rx_ring->lbq_size = 0; rx_ring->lbq_buf_size = 0; rx_ring->sbq_len = 0; rx_ring->sbq_size = 0; rx_ring->sbq_buf_size = 0; rx_ring->type = TX_Q; } } return 0; } static int qlge_open(struct net_device *ndev) { int err = 0; struct ql_adapter *qdev = netdev_priv(ndev); err = ql_adapter_reset(qdev); if (err) return err; err = ql_configure_rings(qdev); if (err) return err; err = ql_get_adapter_resources(qdev); if (err) goto error_up; err = ql_adapter_up(qdev); if (err) goto error_up; return err; error_up: ql_release_adapter_resources(qdev); return err; } static int ql_change_rx_buffers(struct ql_adapter *qdev) { struct rx_ring *rx_ring; int i, status; u32 lbq_buf_len; /* Wait for an outstanding reset to complete. */ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { int i = 3; while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { netif_err(qdev, ifup, qdev->ndev, "Waiting for adapter UP...\n"); ssleep(1); } if (!i) { netif_err(qdev, ifup, qdev->ndev, "Timed out waiting for adapter UP\n"); return -ETIMEDOUT; } } status = ql_adapter_down(qdev); if (status) goto error; /* Get the new rx buffer size. */ lbq_buf_len = (qdev->ndev->mtu > 1500) ? LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; qdev->lbq_buf_order = get_order(lbq_buf_len); for (i = 0; i < qdev->rss_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; /* Set the new size. */ rx_ring->lbq_buf_size = lbq_buf_len; } status = ql_adapter_up(qdev); if (status) goto error; return status; error: netif_alert(qdev, ifup, qdev->ndev, "Driver up/down cycle failed, closing device.\n"); set_bit(QL_ADAPTER_UP, &qdev->flags); dev_close(qdev->ndev); return status; } static int qlge_change_mtu(struct net_device *ndev, int new_mtu) { struct ql_adapter *qdev = netdev_priv(ndev); int status; if (ndev->mtu == 1500 && new_mtu == 9000) { netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); } else if (ndev->mtu == 9000 && new_mtu == 1500) { netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); } else return -EINVAL; queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 3*HZ); ndev->mtu = new_mtu; if (!netif_running(qdev->ndev)) { return 0; } status = ql_change_rx_buffers(qdev); if (status) { netif_err(qdev, ifup, qdev->ndev, "Changing MTU failed.\n"); } return status; } static struct net_device_stats *qlge_get_stats(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); struct rx_ring *rx_ring = &qdev->rx_ring[0]; struct tx_ring *tx_ring = &qdev->tx_ring[0]; unsigned long pkts, mcast, dropped, errors, bytes; int i; /* Get RX stats. */ pkts = mcast = dropped = errors = bytes = 0; for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { pkts += rx_ring->rx_packets; bytes += rx_ring->rx_bytes; dropped += rx_ring->rx_dropped; errors += rx_ring->rx_errors; mcast += rx_ring->rx_multicast; } ndev->stats.rx_packets = pkts; ndev->stats.rx_bytes = bytes; ndev->stats.rx_dropped = dropped; ndev->stats.rx_errors = errors; ndev->stats.multicast = mcast; /* Get TX stats. */ pkts = errors = bytes = 0; for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { pkts += tx_ring->tx_packets; bytes += tx_ring->tx_bytes; errors += tx_ring->tx_errors; } ndev->stats.tx_packets = pkts; ndev->stats.tx_bytes = bytes; ndev->stats.tx_errors = errors; return &ndev->stats; } static void qlge_set_multicast_list(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); struct netdev_hw_addr *ha; int i, status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return; /* * Set or clear promiscuous mode if a * transition is taking place. */ if (ndev->flags & IFF_PROMISC) { if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set promiscuous mode.\n"); } else { set_bit(QL_PROMISCUOUS, &qdev->flags); } } } else { if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { netif_err(qdev, hw, qdev->ndev, "Failed to clear promiscuous mode.\n"); } else { clear_bit(QL_PROMISCUOUS, &qdev->flags); } } } /* * Set or clear all multicast mode if a * transition is taking place. */ if ((ndev->flags & IFF_ALLMULTI) || (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { if (!test_bit(QL_ALLMULTI, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set all-multi mode.\n"); } else { set_bit(QL_ALLMULTI, &qdev->flags); } } } else { if (test_bit(QL_ALLMULTI, &qdev->flags)) { if (ql_set_routing_reg (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { netif_err(qdev, hw, qdev->ndev, "Failed to clear all-multi mode.\n"); } else { clear_bit(QL_ALLMULTI, &qdev->flags); } } } if (!netdev_mc_empty(ndev)) { status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) goto exit; i = 0; netdev_for_each_mc_addr(ha, ndev) { if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, MAC_ADDR_TYPE_MULTI_MAC, i)) { netif_err(qdev, hw, qdev->ndev, "Failed to loadmulticast address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); goto exit; } i++; } ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); if (ql_set_routing_reg (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { netif_err(qdev, hw, qdev->ndev, "Failed to set multicast match mode.\n"); } else { set_bit(QL_ALLMULTI, &qdev->flags); } } exit: ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } static int qlge_set_mac_address(struct net_device *ndev, void *p) { struct ql_adapter *qdev = netdev_priv(ndev); struct sockaddr *addr = p; int status; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); /* Update local copy of current mac address. */ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); if (status) netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); return status; } static void qlge_tx_timeout(struct net_device *ndev) { struct ql_adapter *qdev = netdev_priv(ndev); ql_queue_asic_error(qdev); } static void ql_asic_reset_work(struct work_struct *work) { struct ql_adapter *qdev = container_of(work, struct ql_adapter, asic_reset_work.work); int status; rtnl_lock(); status = ql_adapter_down(qdev); if (status) goto error; status = ql_adapter_up(qdev); if (status) goto error; /* Restore rx mode. */ clear_bit(QL_ALLMULTI, &qdev->flags); clear_bit(QL_PROMISCUOUS, &qdev->flags); qlge_set_multicast_list(qdev->ndev); rtnl_unlock(); return; error: netif_alert(qdev, ifup, qdev->ndev, "Driver up/down cycle failed, closing device\n"); set_bit(QL_ADAPTER_UP, &qdev->flags); dev_close(qdev->ndev); rtnl_unlock(); } static const struct nic_operations qla8012_nic_ops = { .get_flash = ql_get_8012_flash_params, .port_initialize = ql_8012_port_initialize, }; static const struct nic_operations qla8000_nic_ops = { .get_flash = ql_get_8000_flash_params, .port_initialize = ql_8000_port_initialize, }; /* Find the pcie function number for the other NIC * on this chip. Since both NIC functions share a * common firmware we have the lowest enabled function * do any common work. Examples would be resetting * after a fatal firmware error, or doing a firmware * coredump. */ static int ql_get_alt_pcie_func(struct ql_adapter *qdev) { int status = 0; u32 temp; u32 nic_func1, nic_func2; status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, &temp); if (status) return status; nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & MPI_TEST_NIC_FUNC_MASK); nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & MPI_TEST_NIC_FUNC_MASK); if (qdev->func == nic_func1) qdev->alt_func = nic_func2; else if (qdev->func == nic_func2) qdev->alt_func = nic_func1; else status = -EIO; return status; } static int ql_get_board_info(struct ql_adapter *qdev) { int status; qdev->func = (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; if (qdev->func > 3) return -EIO; status = ql_get_alt_pcie_func(qdev); if (status) return status; qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; if (qdev->port) { qdev->xg_sem_mask = SEM_XGMAC1_MASK; qdev->port_link_up = STS_PL1; qdev->port_init = STS_PI1; qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; } else { qdev->xg_sem_mask = SEM_XGMAC0_MASK; qdev->port_link_up = STS_PL0; qdev->port_init = STS_PI0; qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; } qdev->chip_rev_id = ql_read32(qdev, REV_ID); qdev->device_id = qdev->pdev->device; if (qdev->device_id == QLGE_DEVICE_ID_8012) qdev->nic_ops = &qla8012_nic_ops; else if (qdev->device_id == QLGE_DEVICE_ID_8000) qdev->nic_ops = &qla8000_nic_ops; return status; } static void ql_release_all(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); if (qdev->workqueue) { destroy_workqueue(qdev->workqueue); qdev->workqueue = NULL; } if (qdev->reg_base) iounmap(qdev->reg_base); if (qdev->doorbell_area) iounmap(qdev->doorbell_area); vfree(qdev->mpi_coredump); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); } static int __devinit ql_init_device(struct pci_dev *pdev, struct net_device *ndev, int cards_found) { struct ql_adapter *qdev = netdev_priv(ndev); int err = 0; memset((void *)qdev, 0, sizeof(*qdev)); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "PCI device enable failed.\n"); return err; } qdev->ndev = ndev; qdev->pdev = pdev; pci_set_drvdata(pdev, ndev); /* Set PCIe read request size */ err = pcie_set_readrq(pdev, 4096); if (err) { dev_err(&pdev->dev, "Set readrq failed.\n"); goto err_out1; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(&pdev->dev, "PCI region request failed.\n"); return err; } pci_set_master(pdev); if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { set_bit(QL_DMA64, &qdev->flags); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); } if (err) { dev_err(&pdev->dev, "No usable DMA configuration.\n"); goto err_out2; } /* Set PCIe reset type for EEH to fundamental. */ pdev->needs_freset = 1; pci_save_state(pdev); qdev->reg_base = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!qdev->reg_base) { dev_err(&pdev->dev, "Register mapping failed.\n"); err = -ENOMEM; goto err_out2; } qdev->doorbell_area_size = pci_resource_len(pdev, 3); qdev->doorbell_area = ioremap_nocache(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3)); if (!qdev->doorbell_area) { dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); err = -ENOMEM; goto err_out2; } err = ql_get_board_info(qdev); if (err) { dev_err(&pdev->dev, "Register access failed.\n"); err = -EIO; goto err_out2; } qdev->msg_enable = netif_msg_init(debug, default_msg); spin_lock_init(&qdev->hw_lock); spin_lock_init(&qdev->stats_lock); if (qlge_mpi_coredump) { qdev->mpi_coredump = vmalloc(sizeof(struct ql_mpi_coredump)); if (qdev->mpi_coredump == NULL) { dev_err(&pdev->dev, "Coredump alloc failed.\n"); err = -ENOMEM; goto err_out2; } if (qlge_force_coredump) set_bit(QL_FRC_COREDUMP, &qdev->flags); } /* make sure the EEPROM is good */ err = qdev->nic_ops->get_flash(qdev); if (err) { dev_err(&pdev->dev, "Invalid FLASH.\n"); goto err_out2; } memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); /* Keep local copy of current mac address. */ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); /* Set up the default ring sizes. */ qdev->tx_ring_size = NUM_TX_RING_ENTRIES; qdev->rx_ring_size = NUM_RX_RING_ENTRIES; /* Set up the coalescing parameters. */ qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; /* * Set up the operating parameters. */ qdev->workqueue = create_singlethread_workqueue(ndev->name); INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); init_completion(&qdev->ide_completion); mutex_init(&qdev->mpi_mutex); if (!cards_found) { dev_info(&pdev->dev, "%s\n", DRV_STRING); dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", DRV_NAME, DRV_VERSION); } return 0; err_out2: ql_release_all(pdev); err_out1: pci_disable_device(pdev); return err; } static const struct net_device_ops qlge_netdev_ops = { .ndo_open = qlge_open, .ndo_stop = qlge_close, .ndo_start_xmit = qlge_send, .ndo_change_mtu = qlge_change_mtu, .ndo_get_stats = qlge_get_stats, .ndo_set_rx_mode = qlge_set_multicast_list, .ndo_set_mac_address = qlge_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = qlge_tx_timeout, .ndo_fix_features = qlge_fix_features, .ndo_set_features = qlge_set_features, .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, }; static void ql_timer(unsigned long data) { struct ql_adapter *qdev = (struct ql_adapter *)data; u32 var = 0; var = ql_read32(qdev, STS); if (pci_channel_offline(qdev->pdev)) { netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); return; } mod_timer(&qdev->timer, jiffies + (5*HZ)); } static int __devinit qlge_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { struct net_device *ndev = NULL; struct ql_adapter *qdev = NULL; static int cards_found = 0; int err = 0; ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), min(MAX_CPUS, (int)num_online_cpus())); if (!ndev) return -ENOMEM; err = ql_init_device(pdev, ndev, cards_found); if (err < 0) { free_netdev(ndev); return err; } qdev = netdev_priv(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; if (test_bit(QL_DMA64, &qdev->flags)) ndev->features |= NETIF_F_HIGHDMA; /* * Set up net_device structure. */ ndev->tx_queue_len = qdev->tx_ring_size; ndev->irq = pdev->irq; ndev->netdev_ops = &qlge_netdev_ops; SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); ndev->watchdog_timeo = 10 * HZ; err = register_netdev(ndev); if (err) { dev_err(&pdev->dev, "net device registration failed.\n"); ql_release_all(pdev); pci_disable_device(pdev); return err; } /* Start up the timer to trigger EEH if * the bus goes dead */ init_timer_deferrable(&qdev->timer); qdev->timer.data = (unsigned long)qdev; qdev->timer.function = ql_timer; qdev->timer.expires = jiffies + (5*HZ); add_timer(&qdev->timer); ql_link_off(qdev); ql_display_dev_info(ndev); atomic_set(&qdev->lb_count, 0); cards_found++; return 0; } netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) { return qlge_send(skb, ndev); } int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) { return ql_clean_inbound_rx_ring(rx_ring, budget); } static void __devexit qlge_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); unregister_netdev(ndev); ql_release_all(pdev); pci_disable_device(pdev); free_netdev(ndev); } /* Clean up resources without touching hardware. */ static void ql_eeh_close(struct net_device *ndev) { int i; struct ql_adapter *qdev = netdev_priv(ndev); if (netif_carrier_ok(ndev)) { netif_carrier_off(ndev); netif_stop_queue(ndev); } /* Disabling the timer */ del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) netif_napi_del(&qdev->rx_ring[i].napi); clear_bit(QL_ADAPTER_UP, &qdev->flags); ql_tx_ring_clean(qdev); ql_free_rx_buffers(qdev); ql_release_adapter_resources(qdev); } /* * This callback is called by the PCI subsystem whenever * a PCI bus error is detected. */ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: netif_device_detach(ndev); if (netif_running(ndev)) ql_eeh_close(ndev); pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: dev_err(&pdev->dev, "%s: pci_channel_io_perm_failure.\n", __func__); ql_eeh_close(ndev); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; } /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /* * This callback is called after the PCI buss has been reset. * Basically, this tries to restart the card from scratch. * This is a shortened version of the device probe/discovery code, * it resembles the first-half of the () routine. */ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); pdev->error_state = pci_channel_io_normal; pci_restore_state(pdev); if (pci_enable_device(pdev)) { netif_err(qdev, ifup, qdev->ndev, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); if (ql_adapter_reset(qdev)) { netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_RECOVERED; } static void qlge_io_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err = 0; if (netif_running(ndev)) { err = qlge_open(ndev); if (err) { netif_err(qdev, ifup, qdev->ndev, "Device initialization failed after reset.\n"); return; } } else { netif_err(qdev, ifup, qdev->ndev, "Device was not running prior to EEH.\n"); } mod_timer(&qdev->timer, jiffies + (5*HZ)); netif_device_attach(ndev); } static struct pci_error_handlers qlge_err_handler = { .error_detected = qlge_io_error_detected, .slot_reset = qlge_io_slot_reset, .resume = qlge_io_resume, }; static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err; netif_device_detach(ndev); del_timer_sync(&qdev->timer); if (netif_running(ndev)) { err = ql_adapter_down(qdev); if (!err) return err; } ql_wol(qdev); err = pci_save_state(pdev); if (err) return err; pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } #ifdef CONFIG_PM static int qlge_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); int err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) { netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); return err; } pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); if (netif_running(ndev)) { err = ql_adapter_up(qdev); if (err) return err; } mod_timer(&qdev->timer, jiffies + (5*HZ)); netif_device_attach(ndev); return 0; } #endif /* CONFIG_PM */ static void qlge_shutdown(struct pci_dev *pdev) { qlge_suspend(pdev, PMSG_SUSPEND); } static struct pci_driver qlge_driver = { .name = DRV_NAME, .id_table = qlge_pci_tbl, .probe = qlge_probe, .remove = __devexit_p(qlge_remove), #ifdef CONFIG_PM .suspend = qlge_suspend, .resume = qlge_resume, #endif .shutdown = qlge_shutdown, .err_handler = &qlge_err_handler }; static int __init qlge_init_module(void) { return pci_register_driver(&qlge_driver); } static void __exit qlge_exit(void) { pci_unregister_driver(&qlge_driver); } module_init(qlge_init_module); module_exit(qlge_exit);
gpl-2.0
lyapota/m8_sense_3-4-0
drivers/watchdog/via_wdt.c
4807
7815
/* * VIA Chipset Watchdog Driver * * Copyright (C) 2011 Sigfox * License terms: GNU General Public License (GPL) version 2 * Author: Marc Vertes <marc.vertes@sigfox.com> * Based on a preliminary version from Harald Welte <HaraldWelte@viatech.com> * Timer code by Wim Van Sebroeck <wim@iguana.be> * * Caveat: PnP must be enabled in BIOS to allow full access to watchdog * control registers. If not, the watchdog must be configured in BIOS manually. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/timer.h> #include <linux/watchdog.h> /* Configuration registers relative to the pci device */ #define VIA_WDT_MMIO_BASE 0xe8 /* MMIO region base address */ #define VIA_WDT_CONF 0xec /* watchdog enable state */ /* Relevant bits for the VIA_WDT_CONF register */ #define VIA_WDT_CONF_ENABLE 0x01 /* 1: enable watchdog */ #define VIA_WDT_CONF_MMIO 0x02 /* 1: enable watchdog MMIO */ /* * The MMIO region contains the watchog control register and the * hardware timer counter. */ #define VIA_WDT_MMIO_LEN 8 /* MMIO region length in bytes */ #define VIA_WDT_CTL 0 /* MMIO addr+0: state/control reg. */ #define VIA_WDT_COUNT 4 /* MMIO addr+4: timer counter reg. */ /* Bits for the VIA_WDT_CTL register */ #define VIA_WDT_RUNNING 0x01 /* 0: stop, 1: running */ #define VIA_WDT_FIRED 0x02 /* 1: restarted by expired watchdog */ #define VIA_WDT_PWROFF 0x04 /* 0: reset, 1: poweroff */ #define VIA_WDT_DISABLED 0x08 /* 1: timer is disabled */ #define VIA_WDT_TRIGGER 0x80 /* 1: start a new countdown */ /* Hardware heartbeat in seconds */ #define WDT_HW_HEARTBEAT 1 /* Timer heartbeat (500ms) */ #define WDT_HEARTBEAT (HZ/2) /* should be <= ((WDT_HW_HEARTBEAT*HZ)/2) */ /* User space timeout in seconds */ #define WDT_TIMEOUT_MAX 1023 /* approx. 17 min. */ #define WDT_TIMEOUT 60 static int timeout = WDT_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, between 1 and 1023 " "(default = " __MODULE_STRING(WDT_TIMEOUT) ")"); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default = " __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static struct watchdog_device wdt_dev; static struct resource wdt_res; static void __iomem *wdt_mem; static unsigned int mmio; static void wdt_timer_tick(unsigned long data); static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0); /* The timer that pings the watchdog */ static unsigned long next_heartbeat; /* the next_heartbeat for the timer */ static inline void wdt_reset(void) { unsigned int ctl = readl(wdt_mem); writel(ctl | VIA_WDT_TRIGGER, wdt_mem); } /* * Timer tick: the timer will make sure that the watchdog timer hardware * is being reset in time. The conditions to do this are: * 1) the watchog timer has been started and /dev/watchdog is open * and there is still time left before userspace should send the * next heartbeat/ping. (note: the internal heartbeat is much smaller * then the external/userspace heartbeat). * 2) the watchdog timer has been stopped by userspace. */ static void wdt_timer_tick(unsigned long data) { if (time_before(jiffies, next_heartbeat) || (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) { wdt_reset(); mod_timer(&timer, jiffies + WDT_HEARTBEAT); } else pr_crit("I will reboot your machine !\n"); } static int wdt_ping(struct watchdog_device *wdd) { /* calculate when the next userspace timeout will be */ next_heartbeat = jiffies + wdd->timeout * HZ; return 0; } static int wdt_start(struct watchdog_device *wdd) { unsigned int ctl = readl(wdt_mem); writel(wdd->timeout, wdt_mem + VIA_WDT_COUNT); writel(ctl | VIA_WDT_RUNNING | VIA_WDT_TRIGGER, wdt_mem); wdt_ping(wdd); mod_timer(&timer, jiffies + WDT_HEARTBEAT); return 0; } static int wdt_stop(struct watchdog_device *wdd) { unsigned int ctl = readl(wdt_mem); writel(ctl & ~VIA_WDT_RUNNING, wdt_mem); return 0; } static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int new_timeout) { writel(new_timeout, wdt_mem + VIA_WDT_COUNT); wdd->timeout = new_timeout; return 0; } static const struct watchdog_info wdt_info = { .identity = "VIA watchdog", .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, }; static const struct watchdog_ops wdt_ops = { .owner = THIS_MODULE, .start = wdt_start, .stop = wdt_stop, .ping = wdt_ping, .set_timeout = wdt_set_timeout, }; static struct watchdog_device wdt_dev = { .info = &wdt_info, .ops = &wdt_ops, .min_timeout = 1, .max_timeout = WDT_TIMEOUT_MAX, }; static int __devinit wdt_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned char conf; int ret = -ENODEV; if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "cannot enable PCI device\n"); return -ENODEV; } /* * Allocate a MMIO region which contains watchdog control register * and counter, then configure the watchdog to use this region. * This is possible only if PnP is properly enabled in BIOS. * If not, the watchdog must be configured in BIOS manually. */ if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN, 0xf0000000, 0xffffff00, 0xff, NULL, NULL)) { dev_err(&pdev->dev, "MMIO allocation failed\n"); goto err_out_disable_device; } pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start); pci_read_config_byte(pdev, VIA_WDT_CONF, &conf); conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO; pci_write_config_byte(pdev, VIA_WDT_CONF, conf); pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio); if (mmio) { dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio); } else { dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n"); goto err_out_resource; } if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) { dev_err(&pdev->dev, "MMIO region busy\n"); goto err_out_resource; } wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN); if (wdt_mem == NULL) { dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n"); goto err_out_release; } wdt_dev.timeout = timeout; watchdog_set_nowayout(&wdt_dev, nowayout); if (readl(wdt_mem) & VIA_WDT_FIRED) wdt_dev.bootstatus |= WDIOF_CARDRESET; ret = watchdog_register_device(&wdt_dev); if (ret) goto err_out_iounmap; /* start triggering, in case of watchdog already enabled by BIOS */ mod_timer(&timer, jiffies + WDT_HEARTBEAT); return 0; err_out_iounmap: iounmap(wdt_mem); err_out_release: release_mem_region(mmio, VIA_WDT_MMIO_LEN); err_out_resource: release_resource(&wdt_res); err_out_disable_device: pci_disable_device(pdev); return ret; } static void __devexit wdt_remove(struct pci_dev *pdev) { watchdog_unregister_device(&wdt_dev); del_timer(&timer); iounmap(wdt_mem); release_mem_region(mmio, VIA_WDT_MMIO_LEN); release_resource(&wdt_res); pci_disable_device(pdev); } static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) }, { 0 } }; static struct pci_driver wdt_driver = { .name = "via_wdt", .id_table = wdt_pci_table, .probe = wdt_probe, .remove = __devexit_p(wdt_remove), }; static int __init wdt_init(void) { if (timeout < 1 || timeout > WDT_TIMEOUT_MAX) timeout = WDT_TIMEOUT; return pci_register_driver(&wdt_driver); } static void __exit wdt_exit(void) { pci_unregister_driver(&wdt_driver); } module_init(wdt_init); module_exit(wdt_exit); MODULE_AUTHOR("Marc Vertes"); MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset"); MODULE_LICENSE("GPL");
gpl-2.0
Samsung-BCM/android_kernel_samsung_bcm
sound/isa/azt2320.c
5063
9945
/* card-azt2320.c - driver for Aztech Systems AZT2320 based soundcards. Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This driver should provide support for most Aztech AZT2320 based cards. Several AZT2316 chips are also supported/tested, but autoprobe doesn't work: all module option have to be set. No docs available for us at Aztech headquarters !!! Unbelievable ... No other help obtained. Thanks to Rainer Wiesner <rainer.wiesner@01019freenet.de> for the WSS activation method (full-duplex audio!). */ #include <asm/io.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "azt2320: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Aztech Systems AZT2320"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3300}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3000}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for azt2320 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for azt2320 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable azt2320 based soundcard."); struct snd_card_azt2320 { int dev_no; struct pnp_dev *dev; struct pnp_dev *devmpu; struct snd_wss *chip; }; static struct pnp_card_device_id snd_azt2320_pnpids[] = { /* PRO16V */ { .id = "AZT1008", .devs = { { "AZT1008" }, { "AZT2001" }, } }, /* Aztech Sound Galaxy 16 */ { .id = "AZT2320", .devs = { { "AZT0001" }, { "AZT0002" }, } }, /* Packard Bell Sound III 336 AM/SP */ { .id = "AZT3000", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* AT3300 */ { .id = "AZT3002", .devs = { { "AZT1004" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3005", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3011", .devs = { { "AZT1003" }, { "AZT2001" }, } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_azt2320_pnpids); #define DRIVER_NAME "snd-card-azt2320" static int __devinit snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -ENODEV; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, NULL); pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n"); return err; } port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); wss_port[dev] = pnp_port_start(pdev, 2); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); pdev = acard->devmpu; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __mpu_error; mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } else { __mpu_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "MPU401 pnp configure failure, skipping\n"); } acard->devmpu = NULL; mpu_port[dev] = -1; } return 0; } /* same of snd_sbdsp_command by Jaroslav Kysela */ static int __devinit snd_card_azt2320_command(unsigned long port, unsigned char val) { int i; unsigned long limit; limit = jiffies + HZ / 10; for (i = 50000; i && time_after(limit, jiffies); i--) if (!(inb(port + 0x0c) & 0x80)) { outb(val, port + 0x0c); return 0; } return -EBUSY; } static int __devinit snd_card_azt2320_enable_wss(unsigned long port) { int error; if ((error = snd_card_azt2320_command(port, 0x09))) return error; if ((error = snd_card_azt2320_command(port, 0x00))) return error; mdelay(5); return 0; } static int __devinit snd_card_azt2320_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_card_azt2320 *acard; struct snd_wss *chip; struct snd_opl3 *opl3; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_azt2320), &card); if (error < 0) return error; acard = card->private_data; if ((error = snd_card_azt2320_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_card_azt2320_enable_wss(port[dev]))) { snd_card_free(card); return error; } error = snd_wss_create(card, wss_port[dev], -1, irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (error < 0) { snd_card_free(card); return error; } strcpy(card->driver, "AZT2320"); strcpy(card->shortname, "Aztech AZT2320"); sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); error = snd_wss_pcm(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_mixer(chip); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_timer(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_AZT2320, mpu_port[dev], 0, mpu_irq[dev], NULL) < 0) snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n", fm_port[dev], fm_port[dev] + 2); } else { if ((error = snd_opl3_timer_new(opl3, 1, 2)) < 0) { snd_card_free(card); return error; } if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int __devinitdata azt2320_devices; static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_azt2320_probe(dev, card, id); if (res < 0) return res; dev++; azt2320_devices++; return 0; } return -ENODEV; } static void __devexit snd_azt2320_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_azt2320_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->suspend(chip); return 0; } static int snd_azt2320_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; chip->resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver azt2320_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "azt2320", .id_table = snd_azt2320_pnpids, .probe = snd_azt2320_pnp_detect, .remove = __devexit_p(snd_azt2320_pnp_remove), #ifdef CONFIG_PM .suspend = snd_azt2320_pnp_suspend, .resume = snd_azt2320_pnp_resume, #endif }; static int __init alsa_card_azt2320_init(void) { int err; err = pnp_register_card_driver(&azt2320_pnpc_driver); if (err) return err; if (!azt2320_devices) { pnp_unregister_card_driver(&azt2320_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); #endif return -ENODEV; } return 0; } static void __exit alsa_card_azt2320_exit(void) { pnp_unregister_card_driver(&azt2320_pnpc_driver); } module_init(alsa_card_azt2320_init) module_exit(alsa_card_azt2320_exit)
gpl-2.0
finnq/android_kernel_lge_g3
arch/sh/kernel/time.c
7367
2636
/* * arch/sh/kernel/time.c * * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/profile.h> #include <linux/timex.h> #include <linux/sched.h> #include <linux/clockchips.h> #include <linux/platform_device.h> #include <linux/smp.h> #include <linux/rtc.h> #include <asm/clock.h> #include <asm/rtc.h> /* Dummy RTC ops */ static void null_rtc_get_time(struct timespec *tv) { tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); tv->tv_nsec = 0; } static int null_rtc_set_time(const time_t secs) { return 0; } void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; void read_persistent_clock(struct timespec *ts) { rtc_sh_get_time(ts); } #ifdef CONFIG_GENERIC_CMOS_UPDATE int update_persistent_clock(struct timespec now) { return rtc_sh_set_time(now.tv_sec); } #endif unsigned int get_rtc_time(struct rtc_time *tm) { if (rtc_sh_get_time != null_rtc_get_time) { struct timespec tv; rtc_sh_get_time(&tv); rtc_time_to_tm(tv.tv_sec, tm); } return RTC_24H; } EXPORT_SYMBOL(get_rtc_time); int set_rtc_time(struct rtc_time *tm) { unsigned long secs; rtc_tm_to_time(tm, &secs); return rtc_sh_set_time(secs); } EXPORT_SYMBOL(set_rtc_time); static int __init rtc_generic_init(void) { struct platform_device *pdev; if (rtc_sh_get_time == null_rtc_get_time) return -ENODEV; pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } module_init(rtc_generic_init); void (*board_time_init)(void); static void __init sh_late_time_init(void) { /* * Make sure all compiled-in early timers register themselves. * * Run probe() for two "earlytimer" devices, these will be the * clockevents and clocksource devices respectively. In the event * that only a clockevents device is available, we -ENODEV on the * clocksource and the jiffies clocksource is used transparently * instead. No error handling is necessary here. */ early_platform_driver_register_all("earlytimer"); early_platform_driver_probe("earlytimer", 2, 0); } void __init time_init(void) { if (board_time_init) board_time_init(); clk_init(); late_time_init = sh_late_time_init; }
gpl-2.0
jpilet/linux-anemobox
drivers/lguest/segments.c
9671
7965
/*P:600 * The x86 architecture has segments, which involve a table of descriptors * which can be used to do funky things with virtual address interpretation. * We originally used to use segments so the Guest couldn't alter the * Guest<->Host Switcher, and then we had to trim Guest segments, and restore * for userspace per-thread segments, but trim again for on userspace->kernel * transitions... This nightmarish creation was contained within this file, * where we knew not to tread without heavy armament and a change of underwear. * * In these modern times, the segment handling code consists of simple sanity * checks, and the worst you'll experience reading this code is butterfly-rash * from frolicking through its parklike serenity. :*/ #include "lg.h" /*H:600 * Segments & The Global Descriptor Table * * (That title sounds like a bad Nerdcore group. Not to suggest that there are * any good Nerdcore groups, but in high school a friend of mine had a band * called Joe Fish and the Chips, so there are definitely worse band names). * * To refresh: the GDT is a table of 8-byte values describing segments. Once * set up, these segments can be loaded into one of the 6 "segment registers". * * GDT entries are passed around as "struct desc_struct"s, which like IDT * entries are split into two 32-bit members, "a" and "b". One day, someone * will clean that up, and be declared a Hero. (No pressure, I'm just saying). * * Anyway, the GDT entry contains a base (the start address of the segment), a * limit (the size of the segment - 1), and some flags. Sounds simple, and it * would be, except those zany Intel engineers decided that it was too boring * to put the base at one end, the limit at the other, and the flags in * between. They decided to shotgun the bits at random throughout the 8 bytes, * like so: * * 0 16 40 48 52 56 63 * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ] * mit ags part 2 * part 2 * * As a result, this file contains a certain amount of magic numeracy. Let's * begin. */ /* * There are several entries we don't let the Guest set. The TSS entry is the * "Task State Segment" which controls all kinds of delicate things. The * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the * the Guest can't be trusted to deal with double faults. */ static bool ignored_gdt(unsigned int num) { return (num == GDT_ENTRY_TSS || num == GDT_ENTRY_LGUEST_CS || num == GDT_ENTRY_LGUEST_DS || num == GDT_ENTRY_DOUBLEFAULT_TSS); } /*H:630 * Once the Guest gave us new GDT entries, we fix them up a little. We * don't care if they're invalid: the worst that can happen is a General * Protection Fault in the Switcher when it restores a Guest segment register * which tries to use that entry. Then we kill the Guest for causing such a * mess: the message will be "unhandled trap 256". */ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end) { unsigned int i; for (i = start; i < end; i++) { /* * We never copy these ones to real GDT, so we don't care what * they say */ if (ignored_gdt(i)) continue; /* * Segment descriptors contain a privilege level: the Guest is * sometimes careless and leaves this as 0, even though it's * running at privilege level 1. If so, we fix it here. */ if (cpu->arch.gdt[i].dpl == 0) cpu->arch.gdt[i].dpl |= GUEST_PL; /* * Each descriptor has an "accessed" bit. If we don't set it * now, the CPU will try to set it when the Guest first loads * that entry into a segment register. But the GDT isn't * writable by the Guest, so bad things can happen. */ cpu->arch.gdt[i].type |= 0x1; } } /*H:610 * Like the IDT, we never simply use the GDT the Guest gives us. We keep * a GDT for each CPU, and copy across the Guest's entries each time we want to * run the Guest on that CPU. * * This routine is called at boot or modprobe time for each CPU to set up the * constant GDT entries: the ones which are the same no matter what Guest we're * running. */ void setup_default_gdt_entries(struct lguest_ro_state *state) { struct desc_struct *gdt = state->guest_gdt; unsigned long tss = (unsigned long)&state->guest_tss; /* The Switcher segments are full 0-4G segments, privilege level 0 */ gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; /* * The TSS segment refers to the TSS entry for this particular CPU. */ gdt[GDT_ENTRY_TSS].a = 0; gdt[GDT_ENTRY_TSS].b = 0; gdt[GDT_ENTRY_TSS].limit0 = 0x67; gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF; gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF; gdt[GDT_ENTRY_TSS].base2 = tss >> 24; gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */ gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */ gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */ gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */ } /* * This routine sets up the initial Guest GDT for booting. All entries start * as 0 (unusable). */ void setup_guest_gdt(struct lg_cpu *cpu) { /* * Start with full 0-4G segments...except the Guest is allowed to use * them, so set the privilege level appropriately in the flags. */ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL; cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL; } /*H:650 * An optimization of copy_gdt(), for just the three "thead-local storage" * entries. */ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++) gdt[i] = cpu->arch.gdt[i]; } /*H:640 * When the Guest is run on a different CPU, or the GDT entries have changed, * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's * GDT. */ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt) { unsigned int i; /* * The default entries from setup_default_gdt_entries() are not * replaced. See ignored_gdt() above. */ for (i = 0; i < GDT_ENTRIES; i++) if (!ignored_gdt(i)) gdt[i] = cpu->arch.gdt[i]; } /*H:620 * This is where the Guest asks us to load a new GDT entry * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) { /* * We assume the Guest has the same number of GDT entries as the * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ if (num >= ARRAY_SIZE(cpu->arch.gdt)) { kill_guest(cpu, "too many gdt entries %i", num); return; } /* Set it up, then fix it. */ cpu->arch.gdt[num].a = lo; cpu->arch.gdt[num].b = hi; fixup_gdt_table(cpu, num, num+1); /* * Mark that the GDT changed so the core knows it has to copy it again, * even if the Guest is run on the same CPU. */ cpu->changed |= CHANGED_GDT; } /* * This is the fast-track version for just changing the three TLS entries. * Remember that this happens on every context switch, so it's worth * optimizing. But wouldn't it be neater to have a single hypercall to cover * both cases? */ void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls) { struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN]; __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES); fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1); /* Note that just the TLS entries have changed. */ cpu->changed |= CHANGED_GDT_TLS; } /*H:660 * With this, we have finished the Host. * * Five of the seven parts of our task are complete. You have made it through * the Bit of Despair (I think that's somewhere in the page table code, * myself). * * Next, we examine "make Switcher". It's short, but intense. */
gpl-2.0
Validus-Kernel/kernel_oneplus2
arch/avr32/boards/atngw100/mrmt.c
9671
10132
/* * Board-specific setup code for Remote Media Terminal 1 (RMT1) * add-on board for the ATNGW100 Network Gateway * * Copyright (C) 2008 Mediama Technologies * Based on ATNGW100 Network Gateway (Copyright (C) Atmel) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/linkage.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/fb.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/atmel_serial.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <video/atmel_lcdc.h> #include <sound/atmel-ac97c.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/board.h> #include <mach/init.h> #include <mach/portmux.h> /* Define board-specifoic GPIO assignments */ #define PIN_LCD_BL GPIO_PIN_PA(28) #define PWM_CH_BL 0 /* Must match with GPIO pin definition */ #define PIN_LCD_DISP GPIO_PIN_PA(31) #define PIN_AC97_RST_N GPIO_PIN_PA(30) #define PB_EXTINT_BASE 25 #define TS_IRQ 0 #define PIN_TS_EXTINT GPIO_PIN_PB(PB_EXTINT_BASE+TS_IRQ) #define PIN_PB_LEFT GPIO_PIN_PB(11) #define PIN_PB_RIGHT GPIO_PIN_PB(12) #define PIN_PWR_SW_N GPIO_PIN_PB(14) #define PIN_PWR_ON GPIO_PIN_PB(13) #define PIN_ZB_RST_N GPIO_PIN_PA(21) #define PIN_BT_RST GPIO_PIN_PA(22) #define PIN_LED_SYS GPIO_PIN_PA(16) #define PIN_LED_A GPIO_PIN_PA(19) #define PIN_LED_B GPIO_PIN_PE(19) #ifdef CONFIG_BOARD_MRMT_LCD_LQ043T3DX0X /* Sharp LQ043T3DX0x (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "SHA", .monitor = "LQ043T3DX02", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_NORMAL | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_LCD_KWH043GM08 /* Sharp KWH043GM08-Fxx (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "FOR", .monitor = "KWH043GM08", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_INVERTED | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_AC97 static struct ac97c_platform_data __initdata ac97c0_data = { .reset_pin = PIN_AC97_RST_N, }; #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* NOTE: IRQ assignment relies on kernel module parameter */ static struct platform_device rmt_ts_device = { .name = "ucb1400_ts", .id = -1, } }; #endif #ifdef CONFIG_BOARD_MRMT_BL_PWM /* PWM LEDs: LCD Backlight, etc */ static struct gpio_led rmt_pwm_led[] = { /* here the "gpio" is actually a PWM channel */ { .name = "backlight", .gpio = PWM_CH_BL, }, }; static struct gpio_led_platform_data rmt_pwm_led_data = { .num_leds = ARRAY_SIZE(rmt_pwm_led), .leds = rmt_pwm_led, }; static struct platform_device rmt_pwm_led_dev = { .name = "leds-atmel-pwm", .id = -1, .dev = { .platform_data = &rmt_pwm_led_data, }, }; #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS static int ads7846_pendown_state(void) { return !gpio_get_value( PIN_TS_EXTINT ); /* PENIRQ.*/ } static struct ads7846_platform_data ads_info = { .model = 7846, .keep_vref_on = 0, /* Use external VREF pin */ .vref_delay_usecs = 0, .vref_mv = 3300, /* VREF = 3.3V */ .settle_delay_usecs = 800, .penirq_recheck_delay_usecs = 800, .x_plate_ohms = 750, .y_plate_ohms = 300, .pressure_max = 4096, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7846_pendown_state, .filter = NULL, .filter_init = NULL, }; static struct spi_board_info spi01_board_info[] __initdata = { { .modalias = "ads7846", .max_speed_hz = 31250*26, .bus_num = 0, .chip_select = 1, .platform_data = &ads_info, .irq = AT32_EXTINT(TS_IRQ), }, }; #endif /* GPIO Keys: left, right, power, etc */ static const struct gpio_keys_button rmt_gpio_keys_buttons[] = { [0] = { .type = EV_KEY, .code = KEY_POWER, .gpio = PIN_PWR_SW_N, .active_low = 1, .desc = "power button", }, [1] = { .type = EV_KEY, .code = KEY_LEFT, .gpio = PIN_PB_LEFT, .active_low = 1, .desc = "left button", }, [2] = { .type = EV_KEY, .code = KEY_RIGHT, .gpio = PIN_PB_RIGHT, .active_low = 1, .desc = "right button", }, }; static const struct gpio_keys_platform_data rmt_gpio_keys_data = { .nbuttons = ARRAY_SIZE(rmt_gpio_keys_buttons), .buttons = (void *) rmt_gpio_keys_buttons, }; static struct platform_device rmt_gpio_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = (void *) &rmt_gpio_keys_data, } }; #ifdef CONFIG_BOARD_MRMT_RTC_I2C static struct i2c_board_info __initdata mrmt1_i2c_rtc = { I2C_BOARD_INFO("s35390a", 0x30), .irq = 0, }; #endif static void mrmt_power_off(void) { /* PWR_ON=0 will force power off */ gpio_set_value( PIN_PWR_ON, 0 ); } static int __init mrmt1_init(void) { gpio_set_value( PIN_PWR_ON, 1 ); /* Ensure PWR_ON is enabled */ pm_power_off = mrmt_power_off; /* Setup USARTS (other than console) */ at32_map_usart(2, 1, 0); /* USART 2: /dev/ttyS1, RMT1:DB9M */ at32_map_usart(3, 2, ATMEL_USART_RTS | ATMEL_USART_CTS); /* USART 3: /dev/ttyS2, RMT1:Wireless, w/ RTS/CTS */ at32_add_device_usart(1); at32_add_device_usart(2); /* Select GPIO Key pins */ at32_select_gpio( PIN_PWR_SW_N, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_LEFT, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_RIGHT, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_gpio_keys); #ifdef CONFIG_BOARD_MRMT_RTC_I2C i2c_register_board_info(0, &mrmt1_i2c_rtc, 1); #endif #ifndef CONFIG_BOARD_MRMT_LCD_DISABLE /* User "alternate" LCDC inferface on Port E & D */ /* NB: exclude LCDC_CC pin, as NGW100 reserves it for other use */ at32_add_device_lcdc(0, &rmt_lcdc_data, fbmem_start, fbmem_size, (ATMEL_LCDC_ALT_24BIT | ATMEL_LCDC_PE_DVAL ) ); #endif #ifdef CONFIG_BOARD_MRMT_AC97 at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); irq_set_irq_type(AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING); at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info)); spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_ts_device); #endif at32_select_gpio( PIN_LCD_DISP, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_DISP, "LCD_DISP" ); gpio_direction_output( PIN_LCD_DISP, 0 ); /* LCD DISP */ #ifdef CONFIG_BOARD_MRMT_LCD_DISABLE /* Keep Backlight and DISP off */ at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 0 ); /* Backlight */ #else gpio_set_value( PIN_LCD_DISP, 1 ); /* DISP asserted first */ #ifdef CONFIG_BOARD_MRMT_BL_PWM /* Use PWM for Backlight controls */ at32_add_device_pwm(1 << PWM_CH_BL); platform_device_register(&rmt_pwm_led_dev); #else /* Backlight always on */ udelay( 1 ); at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 1 ); #endif #endif /* Make sure BT and Zigbee modules in reset */ at32_select_gpio( PIN_BT_RST, AT32_GPIOF_OUTPUT ); gpio_request( PIN_BT_RST, "BT_RST" ); gpio_direction_output( PIN_BT_RST, 1 ); /* BT Module in Reset */ at32_select_gpio( PIN_ZB_RST_N, AT32_GPIOF_OUTPUT ); gpio_request( PIN_ZB_RST_N, "ZB_RST_N" ); gpio_direction_output( PIN_ZB_RST_N, 0 ); /* XBee Module in Reset */ #ifdef CONFIG_BOARD_MRMT_WIRELESS_ZB udelay( 1000 ); /* Unreset the XBee Module */ gpio_set_value( PIN_ZB_RST_N, 1 ); #endif #ifdef CONFIG_BOARD_MRMT_WIRELESS_BT udelay( 1000 ); /* Unreset the BT Module */ gpio_set_value( PIN_BT_RST, 0 ); #endif return 0; } arch_initcall(mrmt1_init); static int __init mrmt1_early_init(void) { /* To maintain power-on signal in case boot loader did not already */ at32_select_gpio( PIN_PWR_ON, AT32_GPIOF_OUTPUT ); gpio_request( PIN_PWR_ON, "PIN_PWR_ON" ); gpio_direction_output( PIN_PWR_ON, 1 ); return 0; } core_initcall(mrmt1_early_init);
gpl-2.0
xsynergy510x/android_kernel_google_msm
drivers/net/arcnet/capmode.c
12487
7629
/* * Linux ARCnet driver - "cap mode" packet encapsulation. * It adds sequence numbers to packets for communicating between a user space * application and the driver. After a transmit it sends a packet with protocol * byte 0 back up to the userspace containing the sequence number of the packet * plus the transmit-status on the ArcNet. * * Written 2002-4 by Esben Nielsen, Vestas Wind Systems A/S * Derived from arc-rawmode.c by Avery Pennarun. * arc-rawmode was in turned based on skeleton.c, see below. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/module.h> #include <linux/gfp.h> #include <linux/init.h> #include <linux/if_arp.h> #include <net/arp.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/arcdevice.h> #define VERSION "arcnet: cap mode (`c') encapsulation support loaded.\n" /* packet receiver */ static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *skb; struct archdr *pkt = pkthdr; char *pktbuf, *pkthdrbuf; int ofs; BUGMSG(D_DURING, "it's a raw(cap) packet (length=%d)\n", length); if (length >= MinTU) ofs = 512 - length; else ofs = 256 - length; skb = alloc_skb(length + ARC_HDR_SIZE + sizeof(int), GFP_ATOMIC); if (skb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n"); dev->stats.rx_dropped++; return; } skb_put(skb, length + ARC_HDR_SIZE + sizeof(int)); skb->dev = dev; skb_reset_mac_header(skb); pkt = (struct archdr *)skb_mac_header(skb); skb_pull(skb, ARC_HDR_SIZE); /* up to sizeof(pkt->soft) has already been copied from the card */ /* squeeze in an int for the cap encapsulation */ /* use these variables to be sure we count in bytes, not in sizeof(struct archdr) */ pktbuf=(char*)pkt; pkthdrbuf=(char*)pkthdr; memcpy(pktbuf, pkthdrbuf, ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)); memcpy(pktbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto)+sizeof(int), pkthdrbuf+ARC_HDR_SIZE+sizeof(pkt->soft.cap.proto), sizeof(struct archdr)-ARC_HDR_SIZE-sizeof(pkt->soft.cap.proto)); if (length > sizeof(pkt->soft)) lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft), pkt->soft.raw + sizeof(pkt->soft) + sizeof(int), length - sizeof(pkt->soft)); BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx"); skb->protocol = cpu_to_be16(ETH_P_ARCNET); netif_rx(skb); } /* * Create the ARCnet hard/soft headers for cap mode. * There aren't any soft headers in cap mode - not even the protocol id. */ static int build_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, uint8_t daddr) { int hdr_size = ARC_HDR_SIZE; struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); BUGMSG(D_PROTO, "Preparing header for cap packet %x.\n", *((int*)&pkt->soft.cap.cookie[0])); /* * Set the source hardware address. * * This is pretty pointless for most purposes, but it can help in * debugging. ARCnet does not allow us to change the source address in * the actual packet sent) */ pkt->hard.source = *dev->dev_addr; /* see linux/net/ethernet/eth.c to see where I got the following */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { /* * FIXME: fill in the last byte of the dest ipaddr here to better * comply with RFC1051 in "noarp" mode. */ pkt->hard.dest = 0; return hdr_size; } /* otherwise, just fill it in and go! */ pkt->hard.dest = daddr; return hdr_size; /* success */ } static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, int bufnum) { struct arcnet_local *lp = netdev_priv(dev); struct arc_hardware *hard = &pkt->hard; int ofs; /* hard header is not included in packet length */ length -= ARC_HDR_SIZE; /* And neither is the cookie field */ length -= sizeof(int); BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n", lp->next_tx, lp->cur_tx, bufnum); BUGMSG(D_PROTO, "Sending for cap packet %x.\n", *((int*)&pkt->soft.cap.cookie[0])); if (length > XMTU) { /* should never happen! other people already check for this. */ BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n", length, XMTU); length = XMTU; } if (length > MinTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length; } else if (length > MTU) { hard->offset[0] = 0; hard->offset[1] = ofs = 512 - length - 3; } else hard->offset[0] = ofs = 256 - length; BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n", length,ofs); /* Copy the arcnet-header + the protocol byte down: */ lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE); lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft.cap.proto, sizeof(pkt->soft.cap.proto)); /* Skip the extra integer we have written into it as a cookie but write the rest of the message: */ lp->hw.copy_to_card(dev, bufnum, ofs+1, ((unsigned char*)&pkt->soft.cap.mes),length-1); lp->lastload_dest = hard->dest; return 1; /* done */ } static int ack_tx(struct net_device *dev, int acked) { struct arcnet_local *lp = netdev_priv(dev); struct sk_buff *ackskb; struct archdr *ackpkt; int length=sizeof(struct arc_cap); BUGMSG(D_DURING, "capmode: ack_tx: protocol: %x: result: %d\n", lp->outgoing.skb->protocol, acked); BUGLVL(D_SKB) arcnet_dump_skb(dev, lp->outgoing.skb, "ack_tx"); /* Now alloc a skb to send back up through the layers: */ ackskb = alloc_skb(length + ARC_HDR_SIZE , GFP_ATOMIC); if (ackskb == NULL) { BUGMSG(D_NORMAL, "Memory squeeze, can't acknowledge.\n"); goto free_outskb; } skb_put(ackskb, length + ARC_HDR_SIZE ); ackskb->dev = dev; skb_reset_mac_header(ackskb); ackpkt = (struct archdr *)skb_mac_header(ackskb); /* skb_pull(ackskb, ARC_HDR_SIZE); */ skb_copy_from_linear_data(lp->outgoing.skb, ackpkt, ARC_HDR_SIZE + sizeof(struct arc_cap)); ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ ackpkt->soft.cap.mes.ack=acked; BUGMSG(D_PROTO, "Ackknowledge for cap packet %x.\n", *((int*)&ackpkt->soft.cap.cookie[0])); ackskb->protocol = cpu_to_be16(ETH_P_ARCNET); BUGLVL(D_SKB) arcnet_dump_skb(dev, ackskb, "ack_tx_recv"); netif_rx(ackskb); free_outskb: dev_kfree_skb_irq(lp->outgoing.skb); lp->outgoing.proto = NULL; /* We are always finished when in this protocol */ return 0; } static struct ArcProto capmode_proto = { 'r', XMTU, 0, rx, build_header, prepare_tx, NULL, ack_tx }; static void arcnet_cap_init(void) { int count; for (count = 1; count <= 8; count++) if (arc_proto_map[count] == arc_proto_default) arc_proto_map[count] = &capmode_proto; /* for cap mode, we only set the bcast proto if there's no better one */ if (arc_bcast_proto == arc_proto_default) arc_bcast_proto = &capmode_proto; arc_proto_default = &capmode_proto; arc_raw_proto = &capmode_proto; } static int __init capmode_module_init(void) { printk(VERSION); arcnet_cap_init(); return 0; } static void __exit capmode_module_exit(void) { arcnet_unregister_proto(&capmode_proto); } module_init(capmode_module_init); module_exit(capmode_module_exit); MODULE_LICENSE("GPL");
gpl-2.0
tyeo098/MK908-Kernel-NAND
net/netfilter/xt_policy.c
13767
4843
/* IP tables module for matching IPsec policy * * Copyright (c) 2004,2005 Patrick McHardy, <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/init.h> #include <net/xfrm.h> #include <linux/netfilter.h> #include <linux/netfilter/xt_policy.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Xtables: IPsec policy match"); MODULE_LICENSE("GPL"); static inline bool xt_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *m, const union nf_inet_addr *a2, unsigned short family) { switch (family) { case NFPROTO_IPV4: return ((a1->ip ^ a2->ip) & m->ip) == 0; case NFPROTO_IPV6: return ipv6_masked_addr_cmp(&a1->in6, &m->in6, &a2->in6) == 0; } return false; } static bool match_xfrm_state(const struct xfrm_state *x, const struct xt_policy_elem *e, unsigned short family) { #define MATCH_ADDR(x,y,z) (!e->match.x || \ (xt_addr_cmp(&e->x, &e->y, (const union nf_inet_addr *)(z), family) \ ^ e->invert.x)) #define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x)) return MATCH_ADDR(saddr, smask, &x->props.saddr) && MATCH_ADDR(daddr, dmask, &x->id.daddr) && MATCH(proto, x->id.proto) && MATCH(mode, x->props.mode) && MATCH(spi, x->id.spi) && MATCH(reqid, x->props.reqid); } static int match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info, unsigned short family) { const struct xt_policy_elem *e; const struct sec_path *sp = skb->sp; int strict = info->flags & XT_POLICY_MATCH_STRICT; int i, pos; if (sp == NULL) return -1; if (strict && info->len != sp->len) return 0; for (i = sp->len - 1; i >= 0; i--) { pos = strict ? i - sp->len + 1 : 0; if (pos >= info->len) return 0; e = &info->pol[pos]; if (match_xfrm_state(sp->xvec[i], e, family)) { if (!strict) return 1; } else if (strict) return 0; } return strict ? 1 : 0; } static int match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info, unsigned short family) { const struct xt_policy_elem *e; const struct dst_entry *dst = skb_dst(skb); int strict = info->flags & XT_POLICY_MATCH_STRICT; int i, pos; if (dst->xfrm == NULL) return -1; for (i = 0; dst && dst->xfrm; dst = dst->child, i++) { pos = strict ? i : 0; if (pos >= info->len) return 0; e = &info->pol[pos]; if (match_xfrm_state(dst->xfrm, e, family)) { if (!strict) return 1; } else if (strict) return 0; } return strict ? i == info->len : 0; } static bool policy_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_policy_info *info = par->matchinfo; int ret; if (info->flags & XT_POLICY_MATCH_IN) ret = match_policy_in(skb, info, par->family); else ret = match_policy_out(skb, info, par->family); if (ret < 0) ret = info->flags & XT_POLICY_MATCH_NONE ? true : false; else if (info->flags & XT_POLICY_MATCH_NONE) ret = false; return ret; } static int policy_mt_check(const struct xt_mtchk_param *par) { const struct xt_policy_info *info = par->matchinfo; if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) { pr_info("neither incoming nor outgoing policy selected\n"); return -EINVAL; } if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) { pr_info("output policy not valid in PREROUTING and INPUT\n"); return -EINVAL; } if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) { pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); return -EINVAL; } if (info->len > XT_POLICY_MAX_ELEM) { pr_info("too many policy elements\n"); return -EINVAL; } return 0; } static struct xt_match policy_mt_reg[] __read_mostly = { { .name = "policy", .family = NFPROTO_IPV4, .checkentry = policy_mt_check, .match = policy_mt, .matchsize = sizeof(struct xt_policy_info), .me = THIS_MODULE, }, { .name = "policy", .family = NFPROTO_IPV6, .checkentry = policy_mt_check, .match = policy_mt, .matchsize = sizeof(struct xt_policy_info), .me = THIS_MODULE, }, }; static int __init policy_mt_init(void) { return xt_register_matches(policy_mt_reg, ARRAY_SIZE(policy_mt_reg)); } static void __exit policy_mt_exit(void) { xt_unregister_matches(policy_mt_reg, ARRAY_SIZE(policy_mt_reg)); } module_init(policy_mt_init); module_exit(policy_mt_exit); MODULE_ALIAS("ipt_policy"); MODULE_ALIAS("ip6t_policy");
gpl-2.0
Sunfong/sunfong-nexus-s
drivers/scsi/qla2xxx/qla_isr.c
200
67649
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2008 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include <linux/delay.h> #include <linux/slab.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_bsg_fc.h> #include <scsi/scsi_eh.h> static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2100_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint16_t hccr; uint16_t mb[4]; struct rsp_que *rsp; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(&reg->hccr); if (hccr & HCCR_RISC_PAUSE) { if (pci_channel_offline(ha->pdev)) break; /* * Issue a "HARD" reset in order for the RISC interrupt * bit to be cleared. Schedule a big hammmer to get * out of the RISC PAUSED state. */ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); RD_REG_WORD(&reg->hccr); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) break; if (RD_REG_WORD(&reg->semaphore) & BIT_0) { WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); /* Get mailbox data. */ mb[0] = RD_MAILBOX_REG(ha, reg, 0); if (mb[0] > 0x3fff && mb[0] < 0x8000) { qla2x00_mbx_completion(vha, mb[0]); status |= MBX_INTERRUPT; } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); } else { /*EMPTY*/ DEBUG2(printk("scsi(%ld): Unrecognized " "interrupt type (%d).\n", vha->host_no, mb[0])); } /* Release mailbox registers. */ WRT_REG_WORD(&reg->semaphore, 0); RD_REG_WORD(&reg->semaphore); } else { qla2x00_process_response_queue(rsp); WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(&reg->hccr); } } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } return (IRQ_HANDLED); } /** * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2300_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct device_reg_2xxx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint16_t hccr; uint16_t mb[4]; struct rsp_que *rsp; struct qla_hw_data *ha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer\n", __func__); return (IRQ_NONE); } ha = rsp->hw; reg = &ha->iobase->isp; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(&reg->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_WORD(&reg->hccr); if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) qla_printk(KERN_INFO, ha, "Parity error -- " "HCCR=%x, Dumping firmware!\n", hccr); else qla_printk(KERN_INFO, ha, "RISC paused -- " "HCCR=%x, Dumping firmware!\n", hccr); /* * Issue a "HARD" reset in order for the RISC * interrupt bit to be cleared. Schedule a big * hammmer to get out of the RISC PAUSED state. */ WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); RD_REG_WORD(&reg->hccr); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSR_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla2x00_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; /* Release mailbox registers. */ WRT_REG_WORD(&reg->semaphore, 0); break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla2x00_process_response_queue(rsp); break; case 0x15: mb[0] = MBA_CMPLT_1_16BIT; mb[1] = MSW(stat); qla2x00_async_event(vha, rsp, mb); break; case 0x16: mb[0] = MBA_SCSI_COMPLETION; mb[1] = MSW(stat); mb[2] = RD_MAILBOX_REG(ha, reg, 2); qla2x00_async_event(vha, rsp, mb); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " "(%d).\n", vha->host_no, stat & 0xff)); break; } WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD_RELAXED(&reg->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } return (IRQ_HANDLED); } /** * qla2x00_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); if (cnt == 4 || cnt == 5) ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); else ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } if (ha->mcp) { DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", __func__, vha->host_no, ha->mcp->mb[0])); } else { DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", __func__, vha->host_no)); } } static void qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) { static char *event[] = { "Complete", "Request Notification", "Time Extension" }; int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; uint16_t __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ wptr = (uint16_t __iomem *)&reg24->mailbox1; for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) mb[cnt] = RD_REG_WORD(wptr); DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); /* Acknowledgement needed? [Notify && non-zero timeout]. */ timeout = (descr >> 8) & 0xf; if (aen != MBA_IDC_NOTIFY || !timeout) return; DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); rval = qla2x00_post_idc_ack_work(vha, mb); if (rval != QLA_SUCCESS) qla_printk(KERN_WARNING, vha->hw, "IDC failed to post ACK.\n"); } /** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context * @mb: Mailbox registers (0 - 3) */ void qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) { #define LS_UNKNOWN 2 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; char *link_speed; uint16_t handle_cnt; uint16_t cnt, mbx; uint32_t handles[5]; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; uint32_t rscn_entry, host_pid; uint8_t rscn_queue_index; unsigned long flags; /* Setup to process RIO completion. */ handle_cnt = 0; if (IS_QLA8XXX_TYPE(ha)) goto skip_rio; switch (mb[0]) { case MBA_SCSI_COMPLETION: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: handles[0] = mb[1]; handle_cnt = 1; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_3_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handle_cnt = 3; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_4_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handle_cnt = 4; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_5_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); handle_cnt = 5; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handles[1] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; default: break; } skip_rio: switch (mb[0]) { case MBA_SCSI_COMPLETION: /* Fast Post */ if (!vha->flags.online) break; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(vha, rsp->req, handles[cnt]); break; case MBA_RESET: /* Reset */ DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", vha->host_no)); set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; qla_printk(KERN_INFO, ha, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); ha->isp_ops->fw_dump(vha, 1); if (IS_FWI2_CAPABLE(ha)) { if (mb[1] == 0 && mb[2] == 0) { qla_printk(KERN_ERR, ha, "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); vha->flags.online = 0; } else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } else if (mb[1] == 0) { qla_printk(KERN_INFO, ha, "Unrecoverable Hardware Error: adapter marked " "OFFLINE!\n"); vha->flags.online = 0; } else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n", vha->host_no, mb[1])); qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error (%x).\n", mb[1]); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", vha->host_no)); qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", vha->host_no)); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); break; case MBA_LOOP_UP: /* Loop Up Event */ if (IS_QLA2100(ha) || IS_QLA2200(ha)) { link_speed = link_speeds[0]; ha->link_data_rate = PORT_SPEED_1GB; } else { link_speed = link_speeds[LS_UNKNOWN]; if (mb[1] < 5) link_speed = link_speeds[mb[1]]; else if (mb[1] == 0x13) link_speed = link_speeds[5]; ha->link_data_rate = mb[1]; } DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", vha->host_no, link_speed)); qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", link_speed); vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); break; case MBA_LOOP_DOWN: /* Loop Down Event */ mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], mbx)); qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3], mbx); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); break; case MBA_LIP_RESET: /* LIP reset occurred */ DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", vha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP reset occurred (%x).\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->operating_mode = LOOP; vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); break; /* case MBA_DCBX_COMPLETE: */ case MBA_POINT_TO_POINT: /* Point-to-Point */ if (IS_QLA2100(ha)) break; if (IS_QLA8XXX_TYPE(ha)) DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); else DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " "received.\n", vha->host_no)); /* * Until there's a transition from loop down to loop up, treat * this as loop down only. */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); ha->flags.gpsc_supported = 1; vha->flags.management_server_logged_in = 0; break; case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ if (IS_QLA2100(ha)) break; DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " "received.\n", vha->host_no)); qla_printk(KERN_INFO, ha, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); } set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; case MBA_PORT_UPDATE: /* Port database update */ /* * Handle only global and vn-port update events * * Relevant inputs: * mb[1] = N_Port handle of changed port * OR 0xffff for global event * mb[2] = New login state * 7 = Port logged out * mb[3] = LSB is vp_idx, 0xff = all vps * * Skip processing if: * Event is global, vp_idx is NOT all vps, * vp_idx does not match * Event is not global, vp_idx does not match */ if (IS_QLA2XXX_MIDTYPE(ha) && ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) break; /* Global event -- port logout or port unavailable. */ if (mb[1] == 0xffff && mb[2] == 0x7) { DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", vha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): Port unavailable %04x %04x %04x.\n", vha->host_no, mb[1], mb[2], mb[3])); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(vha, 1); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); qla2x00_mark_all_devices_lost(vha, 1); } vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; break; } /* * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET * event etc. earlier indicating loop is down) then process * it. Otherwise ignore it and Wait for RSCN to come in. */ atomic_set(&vha->loop_down_timer, 0); if (atomic_read(&vha->loop_state) != LOOP_DOWN && atomic_read(&vha->loop_state) != LOOP_DEAD) { DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); break; } DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", vha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): Port database changed %04x %04x %04x.\n", vha->host_no, mb[1], mb[2], mb[3])); /* * Mark all devices as missing so we will login again. */ atomic_set(&vha->loop_state, LOOP_UP); qla2x00_mark_all_devices_lost(vha, 1); vha->flags.rscn_queue_overflow = 1; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); break; case MBA_RSCN_UPDATE: /* State Change Registration */ /* Check if the Vport has issued a SCR */ if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) break; /* Only handle SCNs for our Vport index. */ if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", vha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", vha->host_no, mb[1], mb[2], mb[3])); rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) | vha->d_id.b.al_pa; if (rscn_entry == host_pid) { DEBUG(printk(KERN_INFO "scsi(%ld): Ignoring RSCN update to local host " "port ID (%06x)\n", vha->host_no, host_pid)); break; } /* Ignore reserved bits from RSCN-payload. */ rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; rscn_queue_index = vha->rscn_in_ptr + 1; if (rscn_queue_index == MAX_RSCN_COUNT) rscn_queue_index = 0; if (rscn_queue_index != vha->rscn_out_ptr) { vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; vha->rscn_in_ptr = rscn_queue_index; } else { vha->flags.rscn_queue_overflow = 1; } atomic_set(&vha->loop_state, LOOP_UPDATE); atomic_set(&vha->loop_down_timer, 0); vha->flags.management_server_logged_in = 0; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", vha->host_no)); if (IS_FWI2_CAPABLE(ha)) qla24xx_process_response_queue(vha, rsp); else qla2x00_process_response_queue(rsp); break; case MBA_DISCARD_RND_FRAME: DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); break; case MBA_TRACE_NOTIFICATION: DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", vha->host_no, mb[1], mb[2])); break; case MBA_ISP84XX_ALERT: DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); spin_lock_irqsave(&ha->cs84xx->access_lock, flags); switch (mb[1]) { case A84_PANIC_RECOVERY: qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " "%04x %04x\n", mb[2], mb[3]); break; case A84_OP_LOGIN_COMPLETE: ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" "firmware version %x\n", ha->cs84xx->op_fw_version)); break; case A84_DIAG_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" "diagnostic firmware version %x\n", ha->cs84xx->diag_fw_version)); break; case A84_GOLD_LOGIN_COMPLETE: ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; ha->cs84xx->fw_update = 1; DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " "firmware version %x\n", ha->cs84xx->gold_fw_version)); break; default: qla_printk(KERN_ERR, ha, "Alert 84xx: Invalid Alert %04x %04x %04x\n", mb[1], mb[2], mb[3]); } spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); break; case MBA_DCBX_START: DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); break; case MBA_DCBX_PARAM_UPDATE: DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); break; case MBA_FCF_CONF_ERR: DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); break; case MBA_IDC_COMPLETE: case MBA_IDC_NOTIFY: case MBA_IDC_TIME_EXT: qla81xx_idc_event(vha, mb[0], mb[1]); break; } if (!vha->vp_idx && ha->num_vhosts) qla2x00_alert_all_vps(rsp, mb); } /** * qla2x00_process_completed_request() - Process a Fast Post response. * @ha: SCSI driver HA context * @index: SRB index */ static void qla2x00_process_completed_request(struct scsi_qla_host *vha, struct req_que *req, uint32_t index) { srb_t *sp; struct qla_hw_data *ha = vha->hw; /* Validate handle. */ if (index >= MAX_OUTSTANDING_COMMANDS) { DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", vha->host_no, index)); qla_printk(KERN_WARNING, ha, "Invalid SCSI completion handle %d.\n", index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); return; } sp = req->outstanding_cmds[index]; if (sp) { /* Free outstanding command slot. */ req->outstanding_cmds[index] = NULL; /* Save ISP completion status */ sp->cmd->result = DID_OK << 16; qla2x00_sp_compl(ha, sp); } else { DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" " handle(0x%x)\n", vha->host_no, req->id, index)); qla_printk(KERN_WARNING, ha, "Invalid ISP SCSI completion handle\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } } static srb_t * qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, struct req_que *req, void *iocb) { struct qla_hw_data *ha = vha->hw; sts_entry_t *pkt = iocb; srb_t *sp = NULL; uint16_t index; index = LSW(pkt->handle); if (index >= MAX_OUTSTANDING_COMMANDS) { qla_printk(KERN_WARNING, ha, "%s: Invalid completion handle (%x).\n", func, index); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); goto done; } sp = req->outstanding_cmds[index]; if (!sp) { qla_printk(KERN_WARNING, ha, "%s: Invalid completion handle (%x) -- timed-out.\n", func, index); return sp; } if (sp->handle != index) { qla_printk(KERN_WARNING, ha, "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle, index); return NULL; } req->outstanding_cmds[index] = NULL; done: return sp; } static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_entry *mbx) { const char func[] = "MBX-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; struct srb_ctx *ctx; uint16_t *data; uint16_t status; sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); if (!sp) return; ctx = sp->ctx; lio = ctx->u.iocb_cmd; type = ctx->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (mbx->entry_status) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error entry - entry-status=%x " "status=%x state-flag=%x status-flags=%x.\n", fcport->vha->host_no, sp->handle, type, mbx->entry_status, le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), le16_to_cpu(mbx->status_flags))); DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx))); goto logio_done; } status = le16_to_cpu(mbx->status); if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) status = 0; if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { DEBUG2(printk(KERN_DEBUG "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n", fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->mb1))); data[0] = MBS_COMMAND_COMPLETE; if (ctx->type == SRB_LOGIN_CMD) { fcport->port_type = FCT_TARGET; if (le16_to_cpu(mbx->mb1) & BIT_0) fcport->port_type = FCT_INITIATOR; if (le16_to_cpu(mbx->mb1) & BIT_1) fcport->flags |= FCF_FCP2_DEVICE; } goto logio_done; } data[0] = le16_to_cpu(mbx->mb0); switch (data[0]) { case MBS_PORT_ID_USED: data[1] = le16_to_cpu(mbx->mb1); break; case MBS_LOOP_ID_USED: break; default: data[0] = MBS_COMMAND_ERROR; break; } DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x " "mb6=%x mb7=%x.\n", fcport->vha->host_no, sp->handle, type, status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), le16_to_cpu(mbx->mb7))); logio_done: lio->done(sp); } static void qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, struct sts_entry_24xx *pkt, int iocb_type) { const char func[] = "ELS_CT_IOCB"; const char *type; struct qla_hw_data *ha = vha->hw; srb_t *sp; struct srb_ctx *sp_bsg; struct fc_bsg_job *bsg_job; uint16_t comp_status; uint32_t fw_status[3]; uint8_t* fw_sts_ptr; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; sp_bsg = sp->ctx; bsg_job = sp_bsg->u.bsg_job; type = NULL; switch (sp_bsg->type) { case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: type = "els"; break; case SRB_CT_CMD: type = "ct pass-through"; break; default: qla_printk(KERN_WARNING, ha, "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp, sp_bsg->type); return; } comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT * fc payload to the caller */ bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); if (comp_status != CS_COMPLETE) { if (comp_status == CS_DATA_UNDERRUN) { bsg_job->reply->result = DID_OK << 16; bsg_job->reply->reply_payload_rcv_len = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2], le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count))); fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } else { DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x.\n", vha->host_no, sp->handle, type, comp_status, le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1), le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2))); bsg_job->reply->result = DID_ERROR << 16; bsg_job->reply->reply_payload_rcv_len = 0; fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); } DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt))); } else { bsg_job->reply->result = DID_OK << 16;; bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; bsg_job->reply_len = 0; } dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if ((sp_bsg->type == SRB_ELS_CMD_HST) || (sp_bsg->type == SRB_CT_CMD)) kfree(sp->fcport); kfree(sp->ctx); mempool_free(sp, ha->srb_mempool); bsg_job->job_done(bsg_job); } static void qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, struct logio_entry_24xx *logio) { const char func[] = "LOGIO-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *lio; struct srb_ctx *ctx; uint16_t *data; uint32_t iop[2]; sp = qla2x00_get_sp_from_handle(vha, func, req, logio); if (!sp) return; ctx = sp->ctx; lio = ctx->u.iocb_cmd; type = ctx->name; fcport = sp->fcport; data = lio->u.logio.data; data[0] = MBS_COMMAND_ERROR; data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? QLA_LOGIO_LOGIN_RETRIED : 0; if (logio->entry_status) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n", fcport->vha->host_no, sp->handle, type, logio->entry_status)); DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio))); goto logio_done; } if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { DEBUG2(printk(KERN_DEBUG "scsi(%ld:%x): Async-%s complete - iop0=%x.\n", fcport->vha->host_no, sp->handle, type, le32_to_cpu(logio->io_parameter[0]))); data[0] = MBS_COMMAND_COMPLETE; if (ctx->type != SRB_LOGIN_CMD) goto logio_done; iop[0] = le32_to_cpu(logio->io_parameter[0]); if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; if (iop[0] & BIT_8) fcport->flags |= FCF_FCP2_DEVICE; } else if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; if (logio->io_parameter[7] || logio->io_parameter[8]) fcport->supported_classes |= FC_COS_CLASS2; if (logio->io_parameter[9] || logio->io_parameter[10]) fcport->supported_classes |= FC_COS_CLASS3; goto logio_done; } iop[0] = le32_to_cpu(logio->io_parameter[0]); iop[1] = le32_to_cpu(logio->io_parameter[1]); switch (iop[0]) { case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; data[1] = LSW(iop[1]); break; case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; case LSC_SCODE_CMD_FAILED: if ((iop[1] & 0xff) == 0x05) { data[0] = MBS_NOT_LOGGED_IN; break; } /* Fall through. */ default: data[0] = MBS_COMMAND_ERROR; break; } DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n", fcport->vha->host_no, sp->handle, type, le16_to_cpu(logio->comp_status), le32_to_cpu(logio->io_parameter[0]), le32_to_cpu(logio->io_parameter[1]))); logio_done: lio->done(sp); } static void qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct tsk_mgmt_entry *tsk) { const char func[] = "TMF-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *iocb; struct srb_ctx *ctx; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; int error = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) return; ctx = sp->ctx; iocb = ctx->u.iocb_cmd; type = ctx->name; fcport = sp->fcport; if (sts->entry_status) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error - entry-status(%x).\n", fcport->vha->host_no, sp->handle, type, sts->entry_status)); } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error - completion status(%x).\n", fcport->vha->host_no, sp->handle, type, sts->comp_status)); } else if (!(le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error - no response info(%x).\n", fcport->vha->host_no, sp->handle, type, sts->scsi_status)); } else if (le32_to_cpu(sts->rsp_data_len) < 4) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error - not enough response(%d).\n", fcport->vha->host_no, sp->handle, type, sts->rsp_data_len)); } else if (sts->data[3]) { DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error - response(%x).\n", fcport->vha->host_no, sp->handle, type, sts->data[3])); } else { error = 0; } if (error) { iocb->u.tmf.data = error; DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts))); } iocb->done(sp); } static void qla24xx_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mrk_entry_24xx *mrk) { const char func[] = "MRK-IOCB"; const char *type; fc_port_t *fcport; srb_t *sp; struct srb_iocb *iocb; struct srb_ctx *ctx; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)mrk; sp = qla2x00_get_sp_from_handle(vha, func, req, mrk); if (!sp) return; ctx = sp->ctx; iocb = ctx->u.iocb_cmd; type = ctx->name; fcport = sp->fcport; if (sts->entry_status) { iocb->u.marker.data = 1; DEBUG2(printk(KERN_WARNING "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n", fcport->vha->host_no, sp->handle, type, sts->entry_status)); DEBUG2(qla2x00_dump_buffer((uint8_t *)mrk, sizeof(*sts))); } iocb->done(sp); } /** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ void qla2x00_process_response_queue(struct rsp_que *rsp) { struct scsi_qla_host *vha; struct qla_hw_data *ha = rsp->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; sts_entry_t *pkt; uint16_t handle_cnt; uint16_t cnt; vha = pci_get_drvdata(ha->pdev); if (!vha->flags.online) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (sts_entry_t *)rsp->ring_ptr; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { DEBUG3(printk(KERN_INFO "scsi(%ld): Process error entry.\n", vha->host_no)); qla2x00_error_entry(vha, rsp, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_TYPE_21: handle_cnt = ((sts21_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(vha, rsp->req, ((sts21_entry_t *)pkt)->handle[cnt]); } break; case STATUS_TYPE_22: handle_cnt = ((sts22_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(vha, rsp->req, ((sts22_entry_t *)pkt)->handle[cnt]); } break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case MBX_IOCB_TYPE: qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); break; default: /* Type Not Supported. */ DEBUG4(printk(KERN_WARNING "scsi(%ld): Received unknown response pkt type %x " "entry status=%x.\n", vha->host_no, pkt->entry_type, pkt->entry_status)); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); } static inline void qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, struct rsp_que *rsp) { struct scsi_cmnd *cp = sp->cmd; if (sense_len >= SCSI_SENSE_BUFFERSIZE) sense_len = SCSI_SENSE_BUFFERSIZE; sp->request_sense_length = sense_len; sp->request_sense_ptr = cp->sense_buffer; if (sp->request_sense_length > 32) sense_len = 32; memcpy(cp->sense_buffer, sense_data, sense_len); sp->request_sense_ptr += sense_len; sp->request_sense_length -= sense_len; if (sp->request_sense_length != 0) rsp->status_srb = sp; DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, cp->device->channel, cp->device->id, cp->device->lun, cp, cp->serial_number)); if (sense_len) DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); } struct scsi_dif_tuple { __be16 guard; /* Checksum */ __be16 app_tag; /* APPL identifer */ __be32 ref_tag; /* Target LBA or indirect LBA */ }; /* * Checks the guard or meta-data for the type of error * detected by the HBA. In case of errors, we set the * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST * to indicate to the kernel that the HBA detected error. */ static inline void qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) { struct scsi_cmnd *cmd = sp->cmd; struct scsi_dif_tuple *ep = (struct scsi_dif_tuple *)&sts24->data[20]; struct scsi_dif_tuple *ap = (struct scsi_dif_tuple *)&sts24->data[12]; uint32_t e_ref_tag, a_ref_tag; uint16_t e_app_tag, a_app_tag; uint16_t e_guard, a_guard; e_ref_tag = be32_to_cpu(ep->ref_tag); a_ref_tag = be32_to_cpu(ap->ref_tag); e_app_tag = be16_to_cpu(ep->app_tag); a_app_tag = be16_to_cpu(ap->app_tag); e_guard = be16_to_cpu(ep->guard); a_guard = be16_to_cpu(ap->guard); DEBUG18(printk(KERN_DEBUG "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24)); DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref" " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" " tag=0x%x, act guard=0x%x, exp guard=0x%x\n", cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard)); /* check guard */ if (e_guard != a_guard) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x1); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return; } /* check appl tag */ if (e_app_tag != a_app_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x2); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return; } /* check ref tag */ if (e_ref_tag != a_ref_tag) { scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 0x3); set_driver_byte(cmd, DRIVER_SENSE); set_host_byte(cmd, DID_ABORT); cmd->result |= SAM_STAT_CHECK_CONDITION << 1; return; } } /** * qla2x00_status_entry() - Process a Status IOCB entry. * @ha: SCSI driver HA context * @pkt: Entry pointer */ static void qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) { srb_t *sp; fc_port_t *fcport; struct scsi_cmnd *cp; sts_entry_t *sts; struct sts_entry_24xx *sts24; uint16_t comp_status; uint16_t scsi_status; uint16_t ox_id; uint8_t lscsi_status; int32_t resid; uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; uint32_t handle; uint16_t que; struct req_que *req; int logit = 1; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; } else { comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } handle = (uint32_t) LSW(sts->handle); que = MSW(sts->handle); req = ha->req_q_map[que]; /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_process_completed_request(vha, req, handle); return; } /* Validate handle. */ if (handle < MAX_OUTSTANDING_COMMANDS) { sp = req->outstanding_cmds[handle]; req->outstanding_cmds[handle] = NULL; } else sp = NULL; if (sp == NULL) { qla_printk(KERN_WARNING, ha, "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no, sts->handle); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); return; } cp = sp->cmd; if (cp == NULL) { qla_printk(KERN_WARNING, ha, "scsi(%ld): Command already returned (0x%x/%p).\n", vha->host_no, sts->handle, sp); return; } lscsi_status = scsi_status & STATUS_MASK; fcport = sp->fcport; ox_id = 0; sense_len = rsp_info_len = resid_len = fw_resid_len = 0; if (IS_FWI2_CAPABLE(ha)) { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le32_to_cpu(sts24->sense_len); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le32_to_cpu(sts24->rsp_data_len); if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) resid_len = le32_to_cpu(sts24->rsp_residual_count); if (comp_status == CS_DATA_UNDERRUN) fw_resid_len = le32_to_cpu(sts24->residual_len); rsp_info = sts24->data; sense_data = sts24->data; host_to_fcp_swap(sts24->data, sizeof(sts24->data)); ox_id = le16_to_cpu(sts24->ox_id); } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) rsp_info_len = le16_to_cpu(sts->rsp_info_len); resid_len = le32_to_cpu(sts->residual_length); rsp_info = sts->rsp_info; sense_data = sts->req_sense_data; } /* Check for any FCP transport errors. */ if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { /* Sense data lies beyond any FCP RESPONSE data. */ if (IS_FWI2_CAPABLE(ha)) sense_data += rsp_info_len; if (rsp_info_len > 3 && rsp_info[3]) { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): FCP I/O protocol failure " "(0x%x/0x%x).\n", vha->host_no, cp->device->id, cp->device->lun, rsp_info_len, rsp_info[3])); cp->result = DID_BUS_BUSY << 16; goto out; } } /* Check for overrun. */ if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && scsi_status & SS_RESIDUAL_OVER) comp_status = CS_DATA_OVERRUN; /* * Based on Host and scsi status generate status code for Linux */ switch (comp_status) { case CS_COMPLETE: case CS_QUEUE_FULL: if (scsi_status == 0) { cp->result = DID_OK << 16; break; } if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { resid = resid_len; scsi_set_resid(cp, resid); if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16; break; } } cp->result = DID_OK << 16 | lscsi_status; if (lscsi_status == SAM_STAT_TASK_SET_FULL) { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) QUEUE FULL detected.\n", vha->host_no, cp->device->id, cp->device->lun)); break; } logit = 0; if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, sense_len, rsp); break; case CS_DATA_UNDERRUN: /* Use F/W calculated residual length. */ resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; scsi_set_resid(cp, resid); if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) Dropped frame(s) detected " "(0x%x of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); cp->result = DID_ERROR << 16 | lscsi_status; break; } if (!lscsi_status && ((unsigned)(scsi_bufflen(cp) - resid) < cp->underflow)) { qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): Mid-layer underflow " "detected (0x%x of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp)); cp->result = DID_ERROR << 16; break; } } else if (!lscsi_status) { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " "of 0x%x bytes).\n", vha->host_no, cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); cp->result = DID_ERROR << 16; break; } cp->result = DID_OK << 16 | lscsi_status; logit = 0; /* * Check to see if SCSI Status is non zero. If so report SCSI * Status. */ if (lscsi_status != 0) { if (lscsi_status == SAM_STAT_TASK_SET_FULL) { DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) QUEUE FULL detected.\n", vha->host_no, cp->device->id, cp->device->lun)); logit = 1; break; } if (lscsi_status != SS_CHECK_CONDITION) break; memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (!(scsi_status & SS_SENSE_LEN_VALID)) break; qla2x00_handle_sense(sp, sense_data, sense_len, rsp); } break; case CS_PORT_LOGGED_OUT: case CS_PORT_CONFIG_CHG: case CS_PORT_BUSY: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: /* * We are going to have the fc class block the rport * while we try to recover so instruct the mid layer * to requeue until the class decides how to handle this. */ cp->result = DID_TRANSPORT_DISRUPTED << 16; if (comp_status == CS_TIMEOUT) { if (IS_FWI2_CAPABLE(ha)) break; else if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT) == 0) break; } DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n", vha->host_no, cp->device->id, cp->device->lun, atomic_read(&fcport->state))); if (atomic_read(&fcport->state) == FCS_ONLINE) qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); break; case CS_RESET: case CS_ABORTED: cp->result = DID_RESET << 16; break; case CS_DIF_ERROR: qla2x00_handle_dif_error(sp, sts24); break; default: cp->result = DID_ERROR << 16; break; } out: if (logit) DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, cp->device->id, cp->device->lun, comp_status, scsi_status, cp->result, ox_id, cp->serial_number, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len)); if (rsp->status_srb == NULL) qla2x00_sp_compl(ha, sp); } /** * qla2x00_status_cont_entry() - Process a Status Continuations entry. * @ha: SCSI driver HA context * @pkt: Entry pointer * * Extended sense data. */ static void qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; struct qla_hw_data *ha = rsp->hw; srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; if (sp != NULL && sp->request_sense_length != 0) { cp = sp->cmd; if (cp == NULL) { DEBUG2(printk("%s(): Cmd already returned back to OS " "sp=%p.\n", __func__, sp)); qla_printk(KERN_INFO, ha, "cmd is NULL: already returned to OS (sp=%p)\n", sp); rsp->status_srb = NULL; return; } if (sp->request_sense_length > sizeof(pkt->data)) { sense_sz = sizeof(pkt->data); } else { sense_sz = sp->request_sense_length; } /* Move sense data. */ if (IS_FWI2_CAPABLE(ha)) host_to_fcp_swap(pkt->data, sizeof(pkt->data)); memcpy(sp->request_sense_ptr, pkt->data, sense_sz); DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); sp->request_sense_ptr += sense_sz; sp->request_sense_length -= sense_sz; /* Place command on done queue. */ if (sp->request_sense_length == 0) { rsp->status_srb = NULL; qla2x00_sp_compl(ha, sp); } } } /** * qla2x00_error_entry() - Process an error entry. * @ha: SCSI driver HA context * @pkt: Entry pointer */ static void qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; uint32_t handle = LSW(pkt->handle); uint16_t que = MSW(pkt->handle); struct req_que *req = ha->req_q_map[que]; #if defined(QL_DEBUG_LEVEL_2) if (pkt->entry_status & RF_INV_E_ORDER) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); else if (pkt->entry_status & RF_INV_E_COUNT) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); else if (pkt->entry_status & RF_INV_E_PARAM) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Parameter\n", __func__); else if (pkt->entry_status & RF_INV_E_TYPE) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); else if (pkt->entry_status & RF_BUSY) qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); else qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); #endif /* Validate handle. */ if (handle < MAX_OUTSTANDING_COMMANDS) sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp) { /* Free outstanding command slot. */ req->outstanding_cmds[handle] = NULL; /* Bad payload or header */ if (pkt->entry_status & (RF_INV_E_ORDER | RF_INV_E_COUNT | RF_INV_E_PARAM | RF_INV_E_TYPE)) { sp->cmd->result = DID_ERROR << 16; } else if (pkt->entry_status & RF_BUSY) { sp->cmd->result = DID_BUS_BUSY << 16; } else { sp->cmd->result = DID_ERROR << 16; } qla2x00_sp_compl(ha, sp); } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", vha->host_no)); qla_printk(KERN_WARNING, ha, "Error entry - invalid handle\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } } /** * qla24xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; wptr = (uint16_t __iomem *)&reg->mailbox1; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } if (ha->mcp) { DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", __func__, vha->host_no, ha->mcp->mb[0])); } else { DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", __func__, vha->host_no)); } } /** * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct sts_entry_24xx *pkt; struct qla_hw_data *ha = vha->hw; if (!vha->flags.online) return; while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (struct sts_entry_24xx *)rsp->ring_ptr; rsp->ring_index++; if (rsp->ring_index == rsp->length) { rsp->ring_index = 0; rsp->ring_ptr = rsp->ring; } else { rsp->ring_ptr++; } if (pkt->entry_status != 0) { DEBUG3(printk(KERN_INFO "scsi(%ld): Process error entry.\n", vha->host_no)); qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); break; case LOGINOUT_PORT_IOCB_TYPE: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; case TSK_MGMT_IOCB_TYPE: qla24xx_tm_iocb_entry(vha, rsp->req, (struct tsk_mgmt_entry *)pkt); break; case MARKER_TYPE: qla24xx_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); break; case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags); break; case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; default: /* Type Not Supported. */ DEBUG4(printk(KERN_WARNING "scsi(%ld): Received unknown response pkt type %x " "entry status=%x.\n", vha->host_no, pkt->entry_type, pkt->entry_status)); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ if (IS_QLA82XX(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index); } else WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) return; rval = QLA_SUCCESS; WRT_REG_DWORD(&reg->iobase_addr, 0x7C00); RD_REG_DWORD(&reg->iobase_addr); WRT_REG_DWORD(&reg->iobase_window, 0x0001); for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { WRT_REG_DWORD(&reg->iobase_window, 0x0001); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) goto next_test; WRT_REG_DWORD(&reg->iobase_window, 0x0003); for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) { WRT_REG_DWORD(&reg->iobase_window, 0x0003); udelay(10); } else rval = QLA_FUNCTION_TIMEOUT; } if (rval != QLA_SUCCESS) goto done; next_test: if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3) qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); done: WRT_REG_DWORD(&reg->iobase_window, 0x0000); RD_REG_DWORD(&reg->iobase_window); } /** * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla24xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct device_reg_24xx __iomem *reg; int status; unsigned long iter; uint32_t stat; uint32_t hccr; uint16_t mb[4]; struct rsp_que *rsp; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; if (unlikely(pci_channel_offline(ha->pdev))) return IRQ_HANDLED; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(&reg->hccr); qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " "Dumping firmware!\n", hccr); qla2xxx_check_risc_status(vha); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox1); mb[2] = RD_REG_WORD(&reg->mailbox2); mb[3] = RD_REG_WORD(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case 0x13: case 0x14: qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " "(%d).\n", vha->host_no, stat & 0xff)); break; } WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; struct scsi_qla_host *vha; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } static irqreturn_t qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer\n", __func__); return IRQ_NONE; } ha = rsp->hw; /* Clear the interrupt, if enabled, for this response queue */ if (rsp->options & ~BIT_6) { reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD_RELAXED(&reg->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); return IRQ_HANDLED; } static irqreturn_t qla24xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; int status; uint32_t stat; uint32_t hccr; uint16_t mb[4]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { DEBUG(printk( "%s(): NULL response queue pointer\n", __func__)); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp24; status = 0; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(&reg->host_status); if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; hccr = RD_REG_DWORD(&reg->hccr); qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " "Dumping firmware!\n", hccr); qla2xxx_check_risc_status(vha); ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) break; switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla24xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox1); mb[2] = RD_REG_WORD(&reg->mailbox2); mb[3] = RD_REG_WORD(&reg->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case 0x13: case 0x14: qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " "(%d).\n", vha->host_no, stat & 0xff)); break; } WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); } while (0); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); complete(&ha->mbx_intr_comp); } return IRQ_HANDLED; } /* Interrupt handling helpers. */ struct qla_init_msix_entry { const char *name; irq_handler_t handler; }; static struct qla_init_msix_entry msix_entries[3] = { { "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, }; static struct qla_init_msix_entry qla82xx_msix_entries[2] = { { "qla2xxx (default)", qla82xx_msix_default }, { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, }; static void qla24xx_disable_msix(struct qla_hw_data *ha) { int i; struct qla_msix_entry *qentry; for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; if (qentry->have_irq) free_irq(qentry->vector, qentry->rsp); } pci_disable_msix(ha->pdev); kfree(ha->msix_entries); ha->msix_entries = NULL; ha->flags.msix_enabled = 0; } static int qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); if (!entries) return -ENOMEM; for (i = 0; i < ha->msix_count; i++) entries[i].entry = i; ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); if (ret) { if (ret < MIN_MSIX_COUNT) goto msix_failed; qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable support -- %d/%d\n" " Retry with %d vectors\n", ha->msix_count, ret, ret); ha->msix_count = ret; ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); if (ret) { msix_failed: qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" " support, giving up -- %d/%d\n", ha->msix_count, ret); goto msix_out; } ha->max_rsp_queues = ha->msix_count - 1; } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { ret = -ENOMEM; goto msix_out; } ha->flags.msix_enabled = 1; for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; qentry->vector = entries[i].vector; qentry->entry = entries[i].entry; qentry->have_irq = 0; qentry->rsp = NULL; } /* Enable MSI-X vectors for the base queue */ for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; if (IS_QLA82XX(ha)) { ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); } else { ret = request_irq(qentry->vector, msix_entries[i].handler, 0, msix_entries[i].name, rsp); } if (ret) { qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", qentry->vector, ret); qla24xx_disable_msix(ha); ha->mqenable = 0; goto msix_out; } qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } /* Enable MSI-X vector for response queue update for queue 0 */ if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; msix_out: kfree(entries); return ret; } int qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) { int ret; device_reg_t __iomem *reg = ha->iobase; /* If possible, enable MSI-X. */ if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) goto skip_msi; if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && (ha->pdev->subsystem_device == 0x7040 || ha->pdev->subsystem_device == 0x7041 || ha->pdev->subsystem_device == 0x1705)) { DEBUG2(qla_printk(KERN_WARNING, ha, "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n", ha->pdev->subsystem_vendor, ha->pdev->subsystem_device)); goto skip_msi; } if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { DEBUG2(qla_printk(KERN_WARNING, ha, "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", ha->pdev->revision, ha->fw_attributes)); goto skip_msix; } ret = qla24xx_enable_msix(ha, rsp); if (!ret) { DEBUG2(qla_printk(KERN_INFO, ha, "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, ha->fw_attributes)); goto clear_risc_ints; } qla_printk(KERN_WARNING, ha, "MSI-X: Falling back-to MSI mode -- %d.\n", ret); skip_msix: if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha)) goto skip_msi; ret = pci_enable_msi(ha->pdev); if (!ret) { DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); ha->flags.msi_enabled = 1; } else qla_printk(KERN_WARNING, ha, "MSI-X: Falling back-to INTa mode -- %d.\n", ret); skip_msi: ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); if (ret) { qla_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; } ha->flags.inta_enabled = 1; clear_risc_ints: /* * FIXME: Noted that 8014s were being dropped during NK testing. * Timing deltas during MSI-X/INTa transitions? */ if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) goto fail; spin_lock_irq(&ha->hardware_lock); if (IS_FWI2_CAPABLE(ha)) { WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT); } else { WRT_REG_WORD(&reg->isp.semaphore, 0); WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT); WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT); } spin_unlock_irq(&ha->hardware_lock); fail: return ret; } void qla2x00_free_irqs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct rsp_que *rsp = ha->rsp_q_map[0]; if (ha->flags.msix_enabled) qla24xx_disable_msix(ha); else if (ha->flags.msi_enabled) { free_irq(ha->pdev->irq, rsp); pci_disable_msi(ha->pdev); } else free_irq(ha->pdev->irq, rsp); } int qla25xx_request_irq(struct rsp_que *rsp) { struct qla_hw_data *ha = rsp->hw; struct qla_init_msix_entry *intr = &msix_entries[2]; struct qla_msix_entry *msix = rsp->msix; int ret; ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); if (ret) { qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", msix->vector, ret); return ret; } msix->have_irq = 1; msix->rsp = rsp; return ret; }
gpl-2.0
jderrick/linux-blkdev
arch/arm/mach-zynq/slcr.c
456
5971
/* * Xilinx SLCR driver * * Copyright (c) 2011-2013 Xilinx Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA * 02139, USA. */ #include <linux/io.h> #include <linux/reboot.h> #include <linux/mfd/syscon.h> #include <linux/of_address.h> #include <linux/regmap.h> #include <linux/clk/zynq.h> #include "common.h" /* register offsets */ #define SLCR_UNLOCK_OFFSET 0x8 /* SCLR unlock register */ #define SLCR_PS_RST_CTRL_OFFSET 0x200 /* PS Software Reset Control */ #define SLCR_A9_CPU_RST_CTRL_OFFSET 0x244 /* CPU Software Reset Control */ #define SLCR_REBOOT_STATUS_OFFSET 0x258 /* PS Reboot Status */ #define SLCR_PSS_IDCODE 0x530 /* PS IDCODE */ #define SLCR_L2C_RAM 0xA1C /* L2C_RAM in AR#54190 */ #define SLCR_UNLOCK_MAGIC 0xDF0D #define SLCR_A9_CPU_CLKSTOP 0x10 #define SLCR_A9_CPU_RST 0x1 #define SLCR_PSS_IDCODE_DEVICE_SHIFT 12 #define SLCR_PSS_IDCODE_DEVICE_MASK 0x1F static void __iomem *zynq_slcr_base; static struct regmap *zynq_slcr_regmap; /** * zynq_slcr_write - Write to a register in SLCR block * * @val: Value to write to the register * @offset: Register offset in SLCR block * * Return: a negative value on error, 0 on success */ static int zynq_slcr_write(u32 val, u32 offset) { return regmap_write(zynq_slcr_regmap, offset, val); } /** * zynq_slcr_read - Read a register in SLCR block * * @val: Pointer to value to be read from SLCR * @offset: Register offset in SLCR block * * Return: a negative value on error, 0 on success */ static int zynq_slcr_read(u32 *val, u32 offset) { return regmap_read(zynq_slcr_regmap, offset, val); } /** * zynq_slcr_unlock - Unlock SLCR registers * * Return: a negative value on error, 0 on success */ static inline int zynq_slcr_unlock(void) { zynq_slcr_write(SLCR_UNLOCK_MAGIC, SLCR_UNLOCK_OFFSET); return 0; } /** * zynq_slcr_get_device_id - Read device code id * * Return: Device code id */ u32 zynq_slcr_get_device_id(void) { u32 val; zynq_slcr_read(&val, SLCR_PSS_IDCODE); val >>= SLCR_PSS_IDCODE_DEVICE_SHIFT; val &= SLCR_PSS_IDCODE_DEVICE_MASK; return val; } /** * zynq_slcr_system_restart - Restart the entire system. * * @nb: Pointer to restart notifier block (unused) * @action: Reboot mode (unused) * @data: Restart handler private data (unused) * * Return: 0 always */ static int zynq_slcr_system_restart(struct notifier_block *nb, unsigned long action, void *data) { u32 reboot; /* * Clear 0x0F000000 bits of reboot status register to workaround * the FSBL not loading the bitstream after soft-reboot * This is a temporary solution until we know more. */ zynq_slcr_read(&reboot, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(reboot & 0xF0FFFFFF, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(1, SLCR_PS_RST_CTRL_OFFSET); return 0; } static struct notifier_block zynq_slcr_restart_nb = { .notifier_call = zynq_slcr_system_restart, .priority = 192, }; /** * zynq_slcr_cpu_start - Start cpu * @cpu: cpu number */ void zynq_slcr_cpu_start(int cpu) { u32 reg; zynq_slcr_read(&reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg &= ~(SLCR_A9_CPU_RST << cpu); zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg &= ~(SLCR_A9_CPU_CLKSTOP << cpu); zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); zynq_slcr_cpu_state_write(cpu, false); } /** * zynq_slcr_cpu_stop - Stop cpu * @cpu: cpu number */ void zynq_slcr_cpu_stop(int cpu) { u32 reg; zynq_slcr_read(&reg, SLCR_A9_CPU_RST_CTRL_OFFSET); reg |= (SLCR_A9_CPU_CLKSTOP | SLCR_A9_CPU_RST) << cpu; zynq_slcr_write(reg, SLCR_A9_CPU_RST_CTRL_OFFSET); } /** * zynq_slcr_cpu_state - Read/write cpu state * @cpu: cpu number * * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1) * 0 means cpu is running, 1 cpu is going to die. * * Return: true if cpu is running, false if cpu is going to die */ bool zynq_slcr_cpu_state_read(int cpu) { u32 state; state = readl(zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); state &= 1 << (31 - cpu); return !state; } /** * zynq_slcr_cpu_state - Read/write cpu state * @cpu: cpu number * @die: cpu state - true if cpu is going to die * * SLCR_REBOOT_STATUS save upper 2 bits (31/30 cpu states for cpu0 and cpu1) * 0 means cpu is running, 1 cpu is going to die. */ void zynq_slcr_cpu_state_write(int cpu, bool die) { u32 state, mask; state = readl(zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); mask = 1 << (31 - cpu); if (die) state |= mask; else state &= ~mask; writel(state, zynq_slcr_base + SLCR_REBOOT_STATUS_OFFSET); } /** * zynq_early_slcr_init - Early slcr init function * * Return: 0 on success, negative errno otherwise. * * Called very early during boot from platform code to unlock SLCR. */ int __init zynq_early_slcr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-slcr"); if (!np) { pr_err("%s: no slcr node found\n", __func__); BUG(); } zynq_slcr_base = of_iomap(np, 0); if (!zynq_slcr_base) { pr_err("%s: Unable to map I/O memory\n", __func__); BUG(); } np->data = (__force void *)zynq_slcr_base; zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); if (IS_ERR(zynq_slcr_regmap)) { pr_err("%s: failed to find zynq-slcr\n", __func__); return -ENODEV; } /* unlock the SLCR so that registers can be changed */ zynq_slcr_unlock(); /* See AR#54190 design advisory */ regmap_update_bits(zynq_slcr_regmap, SLCR_L2C_RAM, 0x70707, 0x20202); register_restart_handler(&zynq_slcr_restart_nb); pr_info("%s mapped to %p\n", np->name, zynq_slcr_base); of_node_put(np); return 0; }
gpl-2.0
tenorntex/lhbalanced
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
1224
11024
/* * (C) 2001 Dave Jones, Arjan van de ven. * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * * Licensed under the terms of the GNU GPL License version 2. * Based upon reverse engineered information, and on Intel documentation * for chipsets ICH2-M and ICH3-M. * * Many thanks to Ducrot Bruno for finding and fixing the last * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler * for extensive testing. * * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* */ /********************************************************************* * SPEEDSTEP - DEFINITIONS * *********************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/pci.h> #include <linux/sched.h> #include "speedstep-lib.h" /* speedstep_chipset: * It is necessary to know which chipset is used. As accesses to * this device occur at various places in this module, we need a * static struct pci_dev * pointing to that device. */ static struct pci_dev *speedstep_chipset_dev; /* speedstep_processor */ static enum speedstep_processor speedstep_processor; static u32 pmbase; /* * There are only two frequency states for each processor. Values * are in kHz for the time being. */ static struct cpufreq_frequency_table speedstep_freqs[] = { {SPEEDSTEP_HIGH, 0}, {SPEEDSTEP_LOW, 0}, {0, CPUFREQ_TABLE_END}, }; #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \ "speedstep-ich", msg) /** * speedstep_find_register - read the PMBASE address * * Returns: -ENODEV if no register could be found */ static int speedstep_find_register(void) { if (!speedstep_chipset_dev) return -ENODEV; /* get PMBASE */ pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); if (!(pmbase & 0x01)) { printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); return -ENODEV; } pmbase &= 0xFFFFFFFE; if (!pmbase) { printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); return -ENODEV; } dprintk("pmbase is 0x%x\n", pmbase); return 0; } /** * speedstep_set_state - set the SpeedStep state * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * * Tries to change the SpeedStep state. Can be called from * smp_call_function_single. */ static void speedstep_set_state(unsigned int state) { u8 pm2_blk; u8 value; unsigned long flags; if (state > 0x1) return; /* Disable IRQs */ local_irq_save(flags); /* read state */ value = inb(pmbase + 0x50); dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); /* write new state */ value &= 0xFE; value |= state; dprintk("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); /* Disable bus master arbitration */ pm2_blk = inb(pmbase + 0x20); pm2_blk |= 0x01; outb(pm2_blk, (pmbase + 0x20)); /* Actual transition */ outb(value, (pmbase + 0x50)); /* Restore bus master arbitration */ pm2_blk &= 0xfe; outb(pm2_blk, (pmbase + 0x20)); /* check if transition was successful */ value = inb(pmbase + 0x50); /* Enable IRQs */ local_irq_restore(flags); dprintk("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); if (state == (value & 0x1)) dprintk("change to %u MHz succeeded\n", speedstep_get_frequency(speedstep_processor) / 1000); else printk(KERN_ERR "cpufreq: change failed - I/O error\n"); return; } /* Wrapper for smp_call_function_single. */ static void _speedstep_set_state(void *_state) { speedstep_set_state(*(unsigned int *)_state); } /** * speedstep_activate - activate SpeedStep control in the chipset * * Tries to activate the SpeedStep status and control registers. * Returns -EINVAL on an unsupported chipset, and zero on success. */ static int speedstep_activate(void) { u16 value = 0; if (!speedstep_chipset_dev) return -EINVAL; pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); if (!(value & 0x08)) { value |= 0x08; dprintk("activating SpeedStep (TM) registers\n"); pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); } return 0; } /** * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic * * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to * the LPC bridge / PM module which contains all power-management * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected * chipset, or zero on failure. */ static unsigned int speedstep_detect_chipset(void) { speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 4; /* 4-M */ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) return 3; /* 3-M */ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, PCI_ANY_ID, PCI_ANY_ID, NULL); if (speedstep_chipset_dev) { /* speedstep.c causes lockups on Dell Inspirons 8000 and * 8100 which use a pretty old revision of the 82815 * host brige. Abort on these systems. */ static struct pci_dev *hostbridge; hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_MC, PCI_ANY_ID, PCI_ANY_ID, NULL); if (!hostbridge) return 2; /* 2-M */ if (hostbridge->revision < 5) { dprintk("hostbridge does not support speedstep\n"); speedstep_chipset_dev = NULL; pci_dev_put(hostbridge); return 0; } pci_dev_put(hostbridge); return 2; /* 2-M */ } return 0; } static void get_freq_data(void *_speed) { unsigned int *speed = _speed; *speed = speedstep_get_frequency(speedstep_processor); } static unsigned int speedstep_get(unsigned int cpu) { unsigned int speed; /* You're supposed to ensure CPU is online. */ if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); dprintk("detected %u kHz as current frequency\n", speed); return speed; } /** * speedstep_target - set a new CPUFreq policy * @policy: new policy * @target_freq: the target frequency * @relation: how that frequency relates to achieved frequency * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) * * Sets a new CPUFreq policy. */ static int speedstep_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int newstate = 0, policy_cpu; struct cpufreq_freqs freqs; int i; if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) return -EINVAL; policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); freqs.old = speedstep_get(policy_cpu); freqs.new = speedstep_freqs[newstate].frequency; freqs.cpu = policy->cpu; dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); /* no transition necessary */ if (freqs.old == freqs.new) return 0; for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); } smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, true); for_each_cpu(i, policy->cpus) { freqs.cpu = i; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); } return 0; } /** * speedstep_verify - verifies a new CPUFreq policy * @policy: new policy * * Limit must be within speedstep_low_freq and speedstep_high_freq, with * at least one border included. */ static int speedstep_verify(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); } struct get_freqs { struct cpufreq_policy *policy; int ret; }; static void get_freqs_on_cpu(void *_get_freqs) { struct get_freqs *get_freqs = _get_freqs; get_freqs->ret = speedstep_get_freqs(speedstep_processor, &speedstep_freqs[SPEEDSTEP_LOW].frequency, &speedstep_freqs[SPEEDSTEP_HIGH].frequency, &get_freqs->policy->cpuinfo.transition_latency, &speedstep_set_state); } static int speedstep_cpu_init(struct cpufreq_policy *policy) { int result; unsigned int policy_cpu, speed; struct get_freqs gf; /* only run on CPU to be set, or on its sibling */ #ifdef CONFIG_SMP cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); /* detect low and high frequency and transition latency */ gf.policy = policy; smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); if (gf.ret) return gf.ret; /* get current speed setting */ speed = speedstep_get(policy_cpu); if (!speed) return -EIO; dprintk("currently at %s speed setting - %i MHz\n", (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high", (speed / 1000)); /* cpuinfo and default policy values */ policy->cur = speed; result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); if (result) return result; cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); return 0; } static int speedstep_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_put_attr(policy->cpu); return 0; } static struct freq_attr *speedstep_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver speedstep_driver = { .name = "speedstep-ich", .verify = speedstep_verify, .target = speedstep_target, .init = speedstep_cpu_init, .exit = speedstep_cpu_exit, .get = speedstep_get, .owner = THIS_MODULE, .attr = speedstep_attr, }; /** * speedstep_init - initializes the SpeedStep CPUFreq driver * * Initializes the SpeedStep support. Returns -ENODEV on unsupported * devices, -EINVAL on problems during initiatization, and zero on * success. */ static int __init speedstep_init(void) { /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { dprintk("Intel(R) SpeedStep(TM) capable processor " "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { dprintk("Intel(R) SpeedStep(TM) for this chipset not " "(yet) available.\n"); return -ENODEV; } /* activate speedstep support */ if (speedstep_activate()) { pci_dev_put(speedstep_chipset_dev); return -EINVAL; } if (speedstep_find_register()) return -ENODEV; return cpufreq_register_driver(&speedstep_driver); } /** * speedstep_exit - unregisters SpeedStep support * * Unregisters SpeedStep support. */ static void __exit speedstep_exit(void) { pci_dev_put(speedstep_chipset_dev); cpufreq_unregister_driver(&speedstep_driver); } MODULE_AUTHOR("Dave Jones <davej@redhat.com>, " "Dominik Brodowski <linux@brodo.de>"); MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets " "with ICH-M southbridges."); MODULE_LICENSE("GPL"); module_init(speedstep_init); module_exit(speedstep_exit);
gpl-2.0
lambchops468/omap443x-overclock-lge-p769
net/bridge/br_device.c
1736
8512
/* * Device handling code * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/list.h> #include <linux/netfilter_bridge.h> #include <asm/uaccess.h> #include "br_private.h" /* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { br_nf_pre_routing_finish_bridge_slow(skb); return NETDEV_TX_OK; } #endif BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); u64_stats_update_begin(&brstats->syncp); brstats->tx_packets++; /* Exclude ETH_HLEN from byte stats for consistency with Rx chain */ brstats->tx_bytes += skb->len; u64_stats_update_end(&brstats->syncp); rcu_read_lock(); if (is_broadcast_ether_addr(dest)) br_flood_deliver(br, skb); else if (is_multicast_ether_addr(dest)) { if (unlikely(netpoll_tx_running(dev))) { br_flood_deliver(br, skb); goto out; } if (br_multicast_rcv(br, NULL, skb)) { kfree_skb(skb); goto out; } mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else br_flood_deliver(br, skb); } else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); out: rcu_read_unlock(); return NETDEV_TX_OK; } static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br->stats = alloc_percpu(struct br_cpu_netstats); if (!br->stats) return -ENOMEM; return 0; } static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); netdev_update_features(dev); netif_start_queue(dev); br_stp_enable_bridge(br); br_multicast_open(br); return 0; } static void br_dev_set_multicast_list(struct net_device *dev) { } static int br_dev_stop(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br_stp_disable_bridge(br); br_multicast_stop(br); netif_stop_queue(dev); return 0; } static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_bridge *br = netdev_priv(dev); struct br_cpu_netstats tmp, sum = { 0 }; unsigned int cpu; for_each_possible_cpu(cpu) { unsigned int start; const struct br_cpu_netstats *bstats = per_cpu_ptr(br->stats, cpu); do { start = u64_stats_fetch_begin(&bstats->syncp); memcpy(&tmp, bstats, sizeof(tmp)); } while (u64_stats_fetch_retry(&bstats->syncp, start)); sum.tx_bytes += tmp.tx_bytes; sum.tx_packets += tmp.tx_packets; sum.rx_bytes += tmp.rx_bytes; sum.rx_packets += tmp.rx_packets; } stats->tx_bytes = sum.tx_bytes; stats->tx_packets = sum.tx_packets; stats->rx_bytes = sum.rx_bytes; stats->rx_packets = sum.rx_packets; return stats; } static int br_change_mtu(struct net_device *dev, int new_mtu) { struct net_bridge *br = netdev_priv(dev); if (new_mtu < 68 || new_mtu > br_min_mtu(br)) return -EINVAL; dev->mtu = new_mtu; #ifdef CONFIG_BRIDGE_NETFILTER /* remember the MTU in the rtable for PMTU */ dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); #endif return 0; } /* Allow setting mac address to any valid ethernet address. */ static int br_set_mac_address(struct net_device *dev, void *p) { struct net_bridge *br = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; spin_lock_bh(&br->lock); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); br_stp_change_bridge_id(br, addr->sa_data); br->flags |= BR_SET_MAC_ADDR; spin_unlock_bh(&br->lock); return 0; } static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "bridge"); strcpy(info->version, BR_VERSION); strcpy(info->fw_version, "N/A"); strcpy(info->bus_info, "N/A"); } static u32 br_fix_features(struct net_device *dev, u32 features) { struct net_bridge *br = netdev_priv(dev); return br_features_recompute(br, features); } #ifdef CONFIG_NET_POLL_CONTROLLER static void br_poll_controller(struct net_device *br_dev) { } static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { br_netpoll_disable(p); } } static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; int err = 0; list_for_each_entry_safe(p, n, &br->port_list, list) { if (!p->dev) continue; err = br_netpoll_enable(p); if (err) goto fail; } out: return err; fail: br_netpoll_cleanup(dev); goto out; } int br_netpoll_enable(struct net_bridge_port *p) { struct netpoll *np; int err = 0; np = kzalloc(sizeof(*p->np), GFP_KERNEL); err = -ENOMEM; if (!np) goto out; np->dev = p->dev; strlcpy(np->dev_name, p->dev->name, IFNAMSIZ); err = __netpoll_setup(np); if (err) { kfree(np); goto out; } p->np = np; out: return err; } void br_netpoll_disable(struct net_bridge_port *p) { struct netpoll *np = p->np; if (!np) return; p->np = NULL; /* Wait for transmitting packets to finish before freeing. */ synchronize_rcu_bh(); __netpoll_cleanup(np); kfree(np); } #endif static int br_add_slave(struct net_device *dev, struct net_device *slave_dev) { struct net_bridge *br = netdev_priv(dev); return br_add_if(br, slave_dev); } static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) { struct net_bridge *br = netdev_priv(dev); return br_del_if(br, slave_dev); } static const struct ethtool_ops br_ethtool_ops = { .get_drvinfo = br_getinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_init = br_dev_init, .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, .ndo_set_multicast_list = br_dev_set_multicast_list, .ndo_change_mtu = br_change_mtu, .ndo_do_ioctl = br_dev_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = br_netpoll_setup, .ndo_netpoll_cleanup = br_netpoll_cleanup, .ndo_poll_controller = br_poll_controller, #endif .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, }; static void br_dev_free(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); free_percpu(br->stats); free_netdev(dev); } static struct device_type br_type = { .name = "bridge", }; void br_dev_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); random_ether_addr(dev->dev_addr); ether_setup(dev); dev->netdev_ops = &br_netdev_ops; dev->destructor = br_dev_free; SET_ETHTOOL_OPS(dev, &br_ethtool_ops); SET_NETDEV_DEVTYPE(dev, &br_type); dev->tx_queue_len = 0; dev->priv_flags = IFF_EBRIDGE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_HW_VLAN_TX; br->dev = dev; spin_lock_init(&br->lock); INIT_LIST_HEAD(&br->port_list); spin_lock_init(&br->hash_lock); br->bridge_id.prio[0] = 0x80; br->bridge_id.prio[1] = 0x00; memcpy(br->group_addr, br_group_address, ETH_ALEN); br->stp_enabled = BR_NO_STP; br->designated_root = br->bridge_id; br->bridge_max_age = br->max_age = 20 * HZ; br->bridge_hello_time = br->hello_time = 2 * HZ; br->bridge_forward_delay = br->forward_delay = 15 * HZ; br->ageing_time = 300 * HZ; br_netfilter_rtable_init(br); br_stp_timer_init(br); br_multicast_init(br); }
gpl-2.0
SemonCat/texj-kernel-samsung-s6-g9250
sound/isa/es1688/es1688.c
2248
10359
/* * Driver for generic ESS AudioDrive ESx688 soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/isapnp.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/module.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/es1688.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define CRD_NAME "Generic ESS ES1688/ES688 AudioDrive" #define DEV_NAME "es1688" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100}," "{ESS,ES1688 PnP AudioDrive,pnp:ESS0102}," "{ESS,ES688 AudioDrive,pnp:ESS6881}," "{ESS,ES1688 AudioDrive,pnp:ESS1681}}"); MODULE_ALIAS("snd_es968"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; #endif static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* Usually 0x388 */ static long mpu_port[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */ static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard."); #endif MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for " CRD_NAME " driver."); module_param_array(irq, int, NULL, 0444); module_param_array(fm_port, long, NULL, 0444); MODULE_PARM_DESC(fm_port, "FM port # for ES1688 driver."); MODULE_PARM_DESC(irq, "IRQ # for " CRD_NAME " driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for " CRD_NAME " driver."); module_param_array(dma8, int, NULL, 0444); MODULE_PARM_DESC(dma8, "8-bit DMA # for " CRD_NAME " driver."); #ifdef CONFIG_PNP #define is_isapnp_selected(dev) isapnp[dev] #else #define is_isapnp_selected(dev) 0 #endif static int snd_es1688_match(struct device *dev, unsigned int n) { return enable[n] && !is_isapnp_selected(n); } static int snd_es1688_legacy_create(struct snd_card *card, struct device *dev, unsigned int n) { struct snd_es1688 *chip = card->private_data; static long possible_ports[] = {0x220, 0x240, 0x260}; static int possible_irqs[] = {5, 9, 10, 7, -1}; static int possible_dmas[] = {1, 3, 0, -1}; int i, error; if (irq[n] == SNDRV_AUTO_IRQ) { irq[n] = snd_legacy_find_free_irq(possible_irqs); if (irq[n] < 0) { dev_err(dev, "unable to find a free IRQ\n"); return -EBUSY; } } if (dma8[n] == SNDRV_AUTO_DMA) { dma8[n] = snd_legacy_find_free_dma(possible_dmas); if (dma8[n] < 0) { dev_err(dev, "unable to find a free DMA\n"); return -EBUSY; } } if (port[n] != SNDRV_AUTO_PORT) return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); i = 0; do { port[n] = possible_ports[i]; error = snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } while (error < 0 && ++i < ARRAY_SIZE(possible_ports)); return error; } static int snd_es1688_probe(struct snd_card *card, unsigned int n) { struct snd_es1688 *chip = card->private_data; struct snd_opl3 *opl3; struct snd_pcm *pcm; int error; error = snd_es1688_pcm(card, chip, 0, &pcm); if (error < 0) return error; error = snd_es1688_mixer(card, chip); if (error < 0) return error; strlcpy(card->driver, "ES1688", sizeof(card->driver)); strlcpy(card->shortname, pcm->name, sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx, irq %i, dma %i", pcm->name, chip->port, chip->irq, chip->dma8); if (fm_port[n] == SNDRV_AUTO_PORT) fm_port[n] = port[n]; /* share the same port */ if (fm_port[n] > 0) { if (snd_opl3_create(card, fm_port[n], fm_port[n] + 2, OPL3_HW_OPL3, 0, &opl3) < 0) dev_warn(card->dev, "opl3 not detected at 0x%lx\n", fm_port[n]); else { error = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (error < 0) return error; } } if (mpu_irq[n] >= 0 && mpu_irq[n] != SNDRV_AUTO_IRQ && chip->mpu_port > 0) { error = snd_mpu401_uart_new(card, 0, MPU401_HW_ES1688, chip->mpu_port, 0, mpu_irq[n], NULL); if (error < 0) return error; } return snd_card_register(card); } static int snd_es1688_isa_probe(struct device *dev, unsigned int n) { struct snd_card *card; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; error = snd_es1688_legacy_create(card, dev, n); if (error < 0) goto out; snd_card_set_dev(card, dev); error = snd_es1688_probe(card, n); if (error < 0) goto out; dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int snd_es1688_isa_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_es1688_driver = { .match = snd_es1688_match, .probe = snd_es1688_isa_probe, .remove = snd_es1688_isa_remove, #if 0 /* FIXME */ .suspend = snd_es1688_suspend, .resume = snd_es1688_resume, #endif .driver = { .name = DEV_NAME } }; static int snd_es968_pnp_is_probed; #ifdef CONFIG_PNP static int snd_card_es968_pnp(struct snd_card *card, unsigned int n, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_es1688 *chip = card->private_data; struct pnp_dev *pdev; int error; pdev = pnp_request_card_device(pcard, pid->devs[0].id, NULL); if (pdev == NULL) return -ENODEV; error = pnp_activate_dev(pdev); if (error < 0) { snd_printk(KERN_ERR "ES968 pnp configure failure\n"); return error; } port[n] = pnp_port_start(pdev, 0); dma8[n] = pnp_dma(pdev, 0); irq[n] = pnp_irq(pdev, 0); return snd_es1688_create(card, chip, port[n], mpu_port[n], irq[n], mpu_irq[n], dma8[n], ES1688_HW_AUTO); } static int snd_es968_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { struct snd_card *card; static unsigned int dev; int error; struct snd_es1688 *chip; if (snd_es968_pnp_is_probed) return -EBUSY; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev == SNDRV_CARDS) return -ENODEV; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_es1688), &card); if (error < 0) return error; chip = card->private_data; error = snd_card_es968_pnp(card, dev, pcard, pid); if (error < 0) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); error = snd_es1688_probe(card, dev); if (error < 0) return error; pnp_set_card_drvdata(pcard, card); snd_es968_pnp_is_probed = 1; return 0; } static void snd_es968_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); snd_es968_pnp_is_probed = 0; } #ifdef CONFIG_PM static int snd_es968_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); return 0; } static int snd_es968_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_es1688 *chip = card->private_data; snd_es1688_reset(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_device_id snd_es968_pnpids[] = { { .id = "ESS0968", .devs = { { "@@@0968" }, } }, { .id = "ESS0968", .devs = { { "ESS0968" }, } }, { .id = "", } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_es968_pnpids); static struct pnp_card_driver es968_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = DEV_NAME " PnP", .id_table = snd_es968_pnpids, .probe = snd_es968_pnp_detect, .remove = snd_es968_pnp_remove, #ifdef CONFIG_PM .suspend = snd_es968_pnp_suspend, .resume = snd_es968_pnp_resume, #endif }; #endif static int __init alsa_card_es1688_init(void) { #ifdef CONFIG_PNP pnp_register_card_driver(&es968_pnpc_driver); if (snd_es968_pnp_is_probed) return 0; pnp_unregister_card_driver(&es968_pnpc_driver); #endif return isa_register_driver(&snd_es1688_driver, SNDRV_CARDS); } static void __exit alsa_card_es1688_exit(void) { if (!snd_es968_pnp_is_probed) { isa_unregister_driver(&snd_es1688_driver); return; } #ifdef CONFIG_PNP pnp_unregister_card_driver(&es968_pnpc_driver); #endif } module_init(alsa_card_es1688_init); module_exit(alsa_card_es1688_exit);
gpl-2.0
KuronekoDungeon/android_kernel_sony_msm
sound/soc/codecs/ac97.c
2248
3500
/* * ac97.c -- ALSA Soc AC97 codec support * * Copyright 2005 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Generic AC97 support. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/soc.h> static int ac97_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE; return snd_ac97_set_rate(codec->ac97, reg, substream->runtime->rate); } #define STD_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000) static const struct snd_soc_dai_ops ac97_dai_ops = { .prepare = ac97_prepare, }; static struct snd_soc_dai_driver ac97_dai = { .name = "ac97-hifi", .ac97_control = 1, .playback = { .stream_name = "AC97 Playback", .channels_min = 1, .channels_max = 2, .rates = STD_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS,}, .capture = { .stream_name = "AC97 Capture", .channels_min = 1, .channels_max = 2, .rates = STD_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS,}, .ops = &ac97_dai_ops, }; static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg) { return soc_ac97_ops.read(codec->ac97, reg); } static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { soc_ac97_ops.write(codec->ac97, reg, val); return 0; } static int ac97_soc_probe(struct snd_soc_codec *codec) { struct snd_ac97_bus *ac97_bus; struct snd_ac97_template ac97_template; int ret; /* add codec as bus device for standard ac97 */ ret = snd_ac97_bus(codec->card->snd_card, 0, &soc_ac97_ops, NULL, &ac97_bus); if (ret < 0) return ret; memset(&ac97_template, 0, sizeof(struct snd_ac97_template)); ret = snd_ac97_mixer(ac97_bus, &ac97_template, &codec->ac97); if (ret < 0) return ret; return 0; } #ifdef CONFIG_PM static int ac97_soc_suspend(struct snd_soc_codec *codec) { snd_ac97_suspend(codec->ac97); return 0; } static int ac97_soc_resume(struct snd_soc_codec *codec) { snd_ac97_resume(codec->ac97); return 0; } #else #define ac97_soc_suspend NULL #define ac97_soc_resume NULL #endif static struct snd_soc_codec_driver soc_codec_dev_ac97 = { .write = ac97_write, .read = ac97_read, .probe = ac97_soc_probe, .suspend = ac97_soc_suspend, .resume = ac97_soc_resume, }; static int ac97_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_ac97, &ac97_dai, 1); } static int ac97_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver ac97_codec_driver = { .driver = { .name = "ac97-codec", .owner = THIS_MODULE, }, .probe = ac97_probe, .remove = ac97_remove, }; module_platform_driver(ac97_codec_driver); MODULE_DESCRIPTION("Soc Generic AC97 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ac97-codec");
gpl-2.0
honeyx/S3mini_golden_kernel
fs/lockd/svcproc.c
2504
14949
/* * linux/fs/lockd/svcproc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #ifdef CONFIG_LOCKD_V4 static __be32 cast_to_nlm(__be32 status, u32 vers) { /* Note: status is assumed to be in network byte order !!! */ if (vers != 4){ switch (status) { case nlm_granted: case nlm_lck_denied: case nlm_lck_denied_nolocks: case nlm_lck_blocked: case nlm_lck_denied_grace_period: case nlm_drop_reply: break; case nlm4_deadlock: status = nlm_lck_denied; break; default: status = nlm_lck_denied_nolocks; } } return (status); } #define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers)) #else #define cast_status(status) (status) #endif /* * Obtain client and file from arguments */ static __be32 nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh)); if (error != 0) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = file->f_file; lock->fl.fl_owner = (fl_owner_t) host; lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: TEST called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNLOCK: release a lock */ static __be32 nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } /* * This is the generic lockd callback for async RPC calls */ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) { dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } void nlmsvc_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; nlmsvc_release_host(call->a_host); kfree(call); } static void nlmsvc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlmsvc_callback_ops = { .rpc_call_done = nlmsvc_callback_exit, .rpc_release = nlmsvc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, argp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: TEST_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test); } static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: LOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock); } static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: CANCEL_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel); } static __be32 nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock); } static __be32 nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: GRANTED_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace() && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = cast_status(nlmsvc_share_file(host, file, argp)); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to unshare the file */ resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlmsvc_proc_lock(rqstp, argp, resp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { struct nlm_host *host; /* Obtain client */ if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, void *resp) { dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, void *resp) { if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } /* * NLM Server procedures. */ #define nlmsvc_encode_norep nlmsvc_encode_void #define nlmsvc_decode_norep nlmsvc_decode_void #define nlmsvc_decode_testres nlmsvc_decode_void #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void #define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null #define nlmsvc_proc_lock_res nlmsvc_proc_null #define nlmsvc_proc_cancel_res nlmsvc_proc_null #define nlmsvc_proc_unlock_res nlmsvc_proc_null struct nlm_void { int dummy; }; #define PROC(name, xargt, xrest, argt, rest, respsize) \ { .pc_func = (svc_procfunc) nlmsvc_proc_##name, \ .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt, \ .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest, \ .pc_release = NULL, \ .pc_argsize = sizeof(struct nlm_##argt), \ .pc_ressize = sizeof(struct nlm_##rest), \ .pc_xdrressize = respsize, \ } #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define St 1 /* status */ #define No (1+1024/4) /* Net Obj */ #define Rg 2 /* range - offset + size */ struct svc_procedure nlmsvc_procedures[] = { PROC(null, void, void, void, void, 1), PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), PROC(lock, lockargs, res, args, res, Ck+St), PROC(cancel, cancargs, res, args, res, Ck+St), PROC(unlock, unlockargs, res, args, res, Ck+St), PROC(granted, testargs, res, args, res, Ck+St), PROC(test_msg, testargs, norep, args, void, 1), PROC(lock_msg, lockargs, norep, args, void, 1), PROC(cancel_msg, cancargs, norep, args, void, 1), PROC(unlock_msg, unlockargs, norep, args, void, 1), PROC(granted_msg, testargs, norep, args, void, 1), PROC(test_res, testres, norep, res, void, 1), PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(share, shareargs, shareres, args, res, Ck+St+1), PROC(unshare, shareargs, shareres, args, res, Ck+St+1), PROC(nm_lock, lockargs, res, args, res, Ck+St), PROC(free_all, notify, void, args, void, 0), };
gpl-2.0
CyanogenMod/android_kernel_samsung_jf
fs/lockd/svcproc.c
2504
14949
/* * linux/fs/lockd/svcproc.c * * Lockd server procedures. We don't implement the NLM_*_RES * procedures because we don't use the async procedures. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/time.h> #include <linux/lockd/lockd.h> #include <linux/lockd/share.h> #define NLMDBG_FACILITY NLMDBG_CLIENT #ifdef CONFIG_LOCKD_V4 static __be32 cast_to_nlm(__be32 status, u32 vers) { /* Note: status is assumed to be in network byte order !!! */ if (vers != 4){ switch (status) { case nlm_granted: case nlm_lck_denied: case nlm_lck_denied_nolocks: case nlm_lck_blocked: case nlm_lck_denied_grace_period: case nlm_drop_reply: break; case nlm4_deadlock: status = nlm_lck_denied; break; default: status = nlm_lck_denied_nolocks; } } return (status); } #define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers)) #else #define cast_status(status) (status) #endif /* * Obtain client and file from arguments */ static __be32 nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_host **hostp, struct nlm_file **filp) { struct nlm_host *host = NULL; struct nlm_file *file = NULL; struct nlm_lock *lock = &argp->lock; __be32 error = 0; /* nfsd callbacks must have been installed for this procedure */ if (!nlmsvc_ops) return nlm_lck_denied_nolocks; /* Obtain host handle */ if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len)) || (argp->monitor && nsm_monitor(host) < 0)) goto no_locks; *hostp = host; /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh)); if (error != 0) goto no_locks; *filp = file; /* Set up the missing parts of the file_lock structure */ lock->fl.fl_file = file->f_file; lock->fl.fl_owner = (fl_owner_t) host; lock->fl.fl_lmops = &nlmsvc_lock_operations; } return 0; no_locks: nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; } /* * NULL: Test for presence of service */ static __be32 nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { dprintk("lockd: NULL called\n"); return rpc_success; } /* * TEST: Check for conflicting lock */ static __be32 nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: TEST called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now check for conflicting locks */ resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; __be32 rc = rpc_success; dprintk("lockd: LOCK called\n"); resp->cookie = argp->cookie; /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; #if 0 /* If supplied state doesn't match current state, we assume it's * an old request that time-warped somehow. Any error return would * do in this case because it's irrelevant anyway. * * NB: We don't retrieve the remote host's state yet. */ if (host->h_nsmstate && host->h_nsmstate != argp->state) { resp->status = nlm_lck_denied_nolocks; } else #endif /* Now try to lock the file */ resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock, argp->block, &argp->cookie, argp->reclaim)); if (resp->status == nlm_drop_reply) rc = rpc_drop_reply; else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rc; } static __be32 nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: CANCEL called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Try to cancel request. */ resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNLOCK: release a lock */ static __be32 nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNLOCK called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to remove the lock */ resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * GRANTED: A server calls us to tell that a process' lock request * was granted */ static __be32 nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { resp->cookie = argp->cookie; dprintk("lockd: GRANTED called\n"); resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock); dprintk("lockd: GRANTED status %d\n", ntohl(resp->status)); return rpc_success; } /* * This is the generic lockd callback for async RPC calls */ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) { dprintk("lockd: %5u callback returned %d\n", task->tk_pid, -task->tk_status); } void nlmsvc_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; nlmsvc_release_host(call->a_host); kfree(call); } static void nlmsvc_callback_release(void *data) { nlmsvc_release_call(data); } static const struct rpc_call_ops nlmsvc_callback_ops = { .rpc_call_done = nlmsvc_callback_exit, .rpc_release = nlmsvc_callback_release, }; /* * `Async' versions of the above service routines. They aren't really, * because we send the callback before the reply proper. I hope this * doesn't break any clients. */ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp, __be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *)) { struct nlm_host *host; struct nlm_rqst *call; __be32 stat; host = nlmsvc_lookup_host(rqstp, argp->lock.caller, argp->lock.len); if (host == NULL) return rpc_system_err; call = nlm_alloc_call(host); if (call == NULL) return rpc_system_err; stat = func(rqstp, argp, &call->a_res); if (stat != 0) { nlmsvc_release_call(call); return stat; } call->a_flags = RPC_TASK_ASYNC; if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0) return rpc_system_err; return rpc_success; } static __be32 nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: TEST_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_TEST_RES, argp, nlmsvc_proc_test); } static __be32 nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: LOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlmsvc_proc_lock); } static __be32 nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: CANCEL_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlmsvc_proc_cancel); } static __be32 nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: UNLOCK_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlmsvc_proc_unlock); } static __be32 nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { dprintk("lockd: GRANTED_MSG called\n"); return nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlmsvc_proc_granted); } /* * SHARE: create a DOS share or alter existing share. */ static __be32 nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: SHARE called\n"); resp->cookie = argp->cookie; /* Don't accept new lock requests during grace period */ if (locks_in_grace() && !argp->reclaim) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to create the share */ resp->status = cast_status(nlmsvc_share_file(host, file, argp)); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * UNSHARE: Release a DOS share. */ static __be32 nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { struct nlm_host *host; struct nlm_file *file; dprintk("lockd: UNSHARE called\n"); resp->cookie = argp->cookie; /* Don't accept requests during grace period */ if (locks_in_grace()) { resp->status = nlm_lck_denied_grace_period; return rpc_success; } /* Obtain client and file */ if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file))) return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success; /* Now try to unshare the file */ resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } /* * NM_LOCK: Create an unmonitored lock */ static __be32 nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp, struct nlm_res *resp) { dprintk("lockd: NM_LOCK called\n"); argp->monitor = 0; /* just clean the monitor flag */ return nlmsvc_proc_lock(rqstp, argp, resp); } /* * FREE_ALL: Release all locks and shares held by client */ static __be32 nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, void *resp) { struct nlm_host *host; /* Obtain client */ if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL)) return rpc_success; nlmsvc_free_host_resources(host); nlmsvc_release_host(host); return rpc_success; } /* * SM_NOTIFY: private callback from statd (not part of official NLM proto) */ static __be32 nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, void *resp) { dprintk("lockd: SM_NOTIFY called\n"); if (!nlm_privileged_requester(rqstp)) { char buf[RPC_MAX_ADDRBUFLEN]; printk(KERN_WARNING "lockd: rejected NSM callback from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); return rpc_system_err; } nlm_host_rebooted(argp); return rpc_success; } /* * client sent a GRANTED_RES, let's remove the associated block */ static __be32 nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp, void *resp) { if (!nlmsvc_ops) return rpc_success; dprintk("lockd: GRANTED_RES called\n"); nlmsvc_grant_reply(&argp->cookie, argp->status); return rpc_success; } /* * NLM Server procedures. */ #define nlmsvc_encode_norep nlmsvc_encode_void #define nlmsvc_decode_norep nlmsvc_decode_void #define nlmsvc_decode_testres nlmsvc_decode_void #define nlmsvc_decode_lockres nlmsvc_decode_void #define nlmsvc_decode_unlockres nlmsvc_decode_void #define nlmsvc_decode_cancelres nlmsvc_decode_void #define nlmsvc_decode_grantedres nlmsvc_decode_void #define nlmsvc_proc_none nlmsvc_proc_null #define nlmsvc_proc_test_res nlmsvc_proc_null #define nlmsvc_proc_lock_res nlmsvc_proc_null #define nlmsvc_proc_cancel_res nlmsvc_proc_null #define nlmsvc_proc_unlock_res nlmsvc_proc_null struct nlm_void { int dummy; }; #define PROC(name, xargt, xrest, argt, rest, respsize) \ { .pc_func = (svc_procfunc) nlmsvc_proc_##name, \ .pc_decode = (kxdrproc_t) nlmsvc_decode_##xargt, \ .pc_encode = (kxdrproc_t) nlmsvc_encode_##xrest, \ .pc_release = NULL, \ .pc_argsize = sizeof(struct nlm_##argt), \ .pc_ressize = sizeof(struct nlm_##rest), \ .pc_xdrressize = respsize, \ } #define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */ #define St 1 /* status */ #define No (1+1024/4) /* Net Obj */ #define Rg 2 /* range - offset + size */ struct svc_procedure nlmsvc_procedures[] = { PROC(null, void, void, void, void, 1), PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg), PROC(lock, lockargs, res, args, res, Ck+St), PROC(cancel, cancargs, res, args, res, Ck+St), PROC(unlock, unlockargs, res, args, res, Ck+St), PROC(granted, testargs, res, args, res, Ck+St), PROC(test_msg, testargs, norep, args, void, 1), PROC(lock_msg, lockargs, norep, args, void, 1), PROC(cancel_msg, cancargs, norep, args, void, 1), PROC(unlock_msg, unlockargs, norep, args, void, 1), PROC(granted_msg, testargs, norep, args, void, 1), PROC(test_res, testres, norep, res, void, 1), PROC(lock_res, lockres, norep, res, void, 1), PROC(cancel_res, cancelres, norep, res, void, 1), PROC(unlock_res, unlockres, norep, res, void, 1), PROC(granted_res, res, norep, res, void, 1), /* statd callback */ PROC(sm_notify, reboot, void, reboot, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(none, void, void, void, void, 1), PROC(share, shareargs, shareres, args, res, Ck+St+1), PROC(unshare, shareargs, shareres, args, res, Ck+St+1), PROC(nm_lock, lockargs, res, args, res, Ck+St), PROC(free_all, notify, void, args, void, 0), };
gpl-2.0
amphorion/kernel_pyramidv2
drivers/net/tile/tilepro.c
2760
65002
/* * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/kernel.h> /* printk() */ #include <linux/slab.h> /* kmalloc() */ #include <linux/errno.h> /* error codes */ #include <linux/types.h> /* size_t */ #include <linux/interrupt.h> #include <linux/in.h> #include <linux/netdevice.h> /* struct device, and other headers */ #include <linux/etherdevice.h> /* eth_type_trans */ #include <linux/skbuff.h> #include <linux/ioctl.h> #include <linux/cdev.h> #include <linux/hugetlb.h> #include <linux/in6.h> #include <linux/timer.h> #include <linux/io.h> #include <asm/checksum.h> #include <asm/homecache.h> #include <hv/drv_xgbe_intf.h> #include <hv/drv_xgbe_impl.h> #include <hv/hypervisor.h> #include <hv/netio_intf.h> /* For TSO */ #include <linux/ip.h> #include <linux/tcp.h> /* * First, "tile_net_init_module()" initializes all four "devices" which * can be used by linux. * * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes * the network cpus, then uses "tile_net_open_aux()" to initialize * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all * the tiles, provide buffers to LIPP, allow ingress to start, and * turn on hypervisor interrupt handling (and NAPI) on all tiles. * * If registration fails due to the link being down, then "retry_work" * is used to keep calling "tile_net_open_inner()" until it succeeds. * * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to * stop egress, drain the LIPP buffers, unregister all the tiles, stop * LIPP/LEPP, and wipe the LEPP queue. * * We start out with the ingress interrupt enabled on each CPU. When * this interrupt fires, we disable it, and call "napi_schedule()". * This will cause "tile_net_poll()" to be called, which will pull * packets from the netio queue, filtering them out, or passing them * to "netif_receive_skb()". If our budget is exhausted, we will * return, knowing we will be called again later. Otherwise, we * reenable the ingress interrupt, and call "napi_complete()". * * HACK: Since disabling the ingress interrupt is not reliable, we * ignore the interrupt if the global "active" flag is false. * * * NOTE: The use of "native_driver" ensures that EPP exists, and that * we are using "LIPP" and "LEPP". * * NOTE: Failing to free completions for an arbitrarily long time * (which is defined to be illegal) does in fact cause bizarre * problems. The "egress_timer" helps prevent this from happening. */ /* HACK: Allow use of "jumbo" packets. */ /* This should be 1500 if "jumbo" is not set in LIPP. */ /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ /* ISSUE: This has not been thoroughly tested (except at 1500). */ #define TILE_NET_MTU 1500 /* HACK: Define to support GSO. */ /* ISSUE: This may actually hurt performance of the TCP blaster. */ /* #define TILE_NET_GSO */ /* Define this to collapse "duplicate" acks. */ /* #define IGNORE_DUP_ACKS */ /* HACK: Define this to verify incoming packets. */ /* #define TILE_NET_VERIFY_INGRESS */ /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ #define TILE_NET_TX_QUEUE_LEN 0 /* Define to dump packets (prints out the whole packet on tx and rx). */ /* #define TILE_NET_DUMP_PACKETS */ /* Define to enable debug spew (all PDEBUG's are enabled). */ /* #define TILE_NET_DEBUG */ /* Define to activate paranoia checks. */ /* #define TILE_NET_PARANOIA */ /* Default transmit lockup timeout period, in jiffies. */ #define TILE_NET_TIMEOUT (5 * HZ) /* Default retry interval for bringing up the NetIO interface, in jiffies. */ #define TILE_NET_RETRY_INTERVAL (5 * HZ) /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ #define TILE_NET_DEVS 4 /* Paranoia. */ #if NET_IP_ALIGN != LIPP_PACKET_PADDING #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." #endif /* Debug print. */ #ifdef TILE_NET_DEBUG #define PDEBUG(fmt, args...) net_printk(fmt, ## args) #else #define PDEBUG(fmt, args...) #endif MODULE_AUTHOR("Tilera"); MODULE_LICENSE("GPL"); /* * Queue of incoming packets for a specific cpu and device. * * Includes a pointer to the "system" data, and the actual "user" data. */ struct tile_netio_queue { netio_queue_impl_t *__system_part; netio_queue_user_impl_t __user_part; }; /* * Statistics counters for a specific cpu and device. */ struct tile_net_stats_t { u32 rx_packets; u32 rx_bytes; u32 tx_packets; u32 tx_bytes; }; /* * Info for a specific cpu and device. * * ISSUE: There is a "dev" pointer in "napi" as well. */ struct tile_net_cpu { /* The NAPI struct. */ struct napi_struct napi; /* Packet queue. */ struct tile_netio_queue queue; /* Statistics. */ struct tile_net_stats_t stats; /* True iff NAPI is enabled. */ bool napi_enabled; /* True if this tile has succcessfully registered with the IPP. */ bool registered; /* True if the link was down last time we tried to register. */ bool link_down; /* True if "egress_timer" is scheduled. */ bool egress_timer_scheduled; /* Number of small sk_buffs which must still be provided. */ unsigned int num_needed_small_buffers; /* Number of large sk_buffs which must still be provided. */ unsigned int num_needed_large_buffers; /* A timer for handling egress completions. */ struct timer_list egress_timer; }; /* * Info for a specific device. */ struct tile_net_priv { /* Our network device. */ struct net_device *dev; /* Pages making up the egress queue. */ struct page *eq_pages; /* Address of the actual egress queue. */ lepp_queue_t *eq; /* Protects "eq". */ spinlock_t eq_lock; /* The hypervisor handle for this interface. */ int hv_devhdl; /* The intr bit mask that IDs this device. */ u32 intr_id; /* True iff "tile_net_open_aux()" has succeeded. */ bool partly_opened; /* True iff the device is "active". */ bool active; /* Effective network cpus. */ struct cpumask network_cpus_map; /* Number of network cpus. */ int network_cpus_count; /* Credits per network cpu. */ int network_cpus_credits; /* Network stats. */ struct net_device_stats stats; /* For NetIO bringup retries. */ struct delayed_work retry_work; /* Quick access to per cpu data. */ struct tile_net_cpu *cpu[NR_CPUS]; }; /* Log2 of the number of small pages needed for the egress queue. */ #define EQ_ORDER get_order(sizeof(lepp_queue_t)) /* Size of the egress queue's pages. */ #define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) /* * The actual devices (xgbe0, xgbe1, gbe0, gbe1). */ static struct net_device *tile_net_devs[TILE_NET_DEVS]; /* * The "tile_net_cpu" structures for each device. */ static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); /* * True if "network_cpus" was specified. */ static bool network_cpus_used; /* * The actual cpus in "network_cpus". */ static struct cpumask network_cpus_map; #ifdef TILE_NET_DEBUG /* * printk with extra stuff. * * We print the CPU we're running in brackets. */ static void net_printk(char *fmt, ...) { int i; int len; va_list args; static char buf[256]; len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); va_start(args, fmt); i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); va_end(args); buf[255] = '\0'; pr_notice(buf); } #endif #ifdef TILE_NET_DUMP_PACKETS /* * Dump a packet. */ static void dump_packet(unsigned char *data, unsigned long length, char *s) { int my_cpu = smp_processor_id(); unsigned long i; char buf[128]; static unsigned int count; pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", data, length, s, count++); pr_info("\n"); for (i = 0; i < length; i++) { if ((i & 0xf) == 0) sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); sprintf(buf + strlen(buf), " %2.2x", data[i]); if ((i & 0xf) == 0xf || i == length - 1) { strcat(buf, "\n"); pr_info("%s", buf); } } } #endif /* * Provide support for the __netio_fastio1() swint * (see <hv/drv_xgbe_intf.h> for how it is used). * * The fastio swint2 call may clobber all the caller-saved registers. * It rarely clobbers memory, but we allow for the possibility in * the signature just to be on the safe side. * * Also, gcc doesn't seem to allow an input operand to be * clobbered, so we fake it with dummy outputs. * * This function can't be static because of the way it is declared * in the netio header. */ inline int __netio_fastio1(u32 fastio_index, u32 arg0) { long result, clobber_r1, clobber_r10; asm volatile("swint2" : "=R00" (result), "=R01" (clobber_r1), "=R10" (clobber_r10) : "R10" (fastio_index), "R01" (arg0) : "memory", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29"); return result; } /* * Provide a linux buffer to LIPP. */ static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, void *va, bool small) { struct tile_netio_queue *queue = &info->queue; /* Convert "va" and "small" to "linux_buffer_t". */ unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); } /* * Provide a linux buffer for LIPP. * * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", * plus a chunk of memory that includes not only the requested bytes, but * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". * * Note that "struct skb_shared_info" is 88 bytes with 64K pages and * 268 bytes with 4K pages (since the frags[] array needs 18 entries). * * Without jumbo packets, the maximum packet size will be 1536 bytes, * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told * the hardware to clip at 1518 bytes instead of 1536 bytes, then we * could save an entire cache line, but in practice, we don't need it. * * Since CPAs are 38 bits, and we can only encode the high 31 bits in * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must * align the actual "va" mod 128. * * We assume that the underlying "head" will be aligned mod 64. Note * that in practice, we have seen "head" NOT aligned mod 128 even when * using 2048 byte allocations, which is surprising. * * If "head" WAS always aligned mod 128, we could change LIPP to * assume that the low SIX bits are zero, and the 7th bit is one, that * is, align the actual "va" mod 128 plus 64, which would be "free". * * For now, the actual "head" pointer points at NET_SKB_PAD bytes of * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for * the actual packet, plus 62 bytes of empty padding, plus some * padding and the "struct skb_shared_info". * * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. * * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 * bytes, or 344 bytes, which means we are wasting 64+ bytes, and * could presumably increase the size of small buffers. * * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. * * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 * bytes, or 524 bytes, which is annoyingly wasteful. * * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? * * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? */ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, bool small) { #if TILE_NET_MTU <= 1536 /* Without "jumbo", 2 + 1536 should be sufficient. */ unsigned int large_size = NET_IP_ALIGN + 1536; #else /* ISSUE: This has not been tested. */ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; #endif /* Avoid "false sharing" with last cache line. */ /* ISSUE: This is already done by "dev_alloc_skb()". */ unsigned int len = (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); unsigned int padding = 128 - NET_SKB_PAD; unsigned int align; struct sk_buff *skb; void *va; struct sk_buff **skb_ptr; /* Request 96 extra bytes for alignment purposes. */ skb = dev_alloc_skb(len + padding); if (skb == NULL) return false; /* Skip 32 or 96 bytes to align "data" mod 128. */ align = -(long)skb->data & (128 - 1); BUG_ON(align > padding); skb_reserve(skb, align); /* This address is given to IPP. */ va = skb->data; /* Buffers must not span a huge page. */ BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); #ifdef TILE_NET_PARANOIA #if CHIP_HAS_CBOX_HOME_MAP() if (hash_default) { HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", va, hv_pte_get_mode(pte), hv_pte_val(pte)); } #endif #endif /* Invalidate the packet buffer. */ if (!hash_default) __inv_buffer(va, len); /* Skip two bytes to satisfy LIPP assumptions. */ /* Note that this aligns IP on a 16 byte boundary. */ /* ISSUE: Do this when the packet arrives? */ skb_reserve(skb, NET_IP_ALIGN); /* Save a back-pointer to 'skb'. */ skb_ptr = va - sizeof(*skb_ptr); *skb_ptr = skb; /* Make sure "skb_ptr" has been flushed. */ __insn_mf(); /* Provide the new buffer. */ tile_net_provide_linux_buffer(info, va, small); return true; } /* * Provide linux buffers for LIPP. */ static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) { while (info->num_needed_small_buffers != 0) { if (!tile_net_provide_needed_buffer(info, true)) goto oops; info->num_needed_small_buffers--; } while (info->num_needed_large_buffers != 0) { if (!tile_net_provide_needed_buffer(info, false)) goto oops; info->num_needed_large_buffers--; } return; oops: /* Add a description to the page allocation failure dump. */ pr_notice("Could not provide a linux buffer to LIPP.\n"); } /* * Grab some LEPP completions, and store them in "comps", of size * "comps_size", and return the number of completions which were * stored, so the caller can free them. */ static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, struct sk_buff *comps[], unsigned int comps_size, unsigned int min_size) { unsigned int n = 0; unsigned int comp_head = eq->comp_head; unsigned int comp_busy = eq->comp_busy; while (comp_head != comp_busy && n < comps_size) { comps[n++] = eq->comps[comp_head]; LEPP_QINC(comp_head); } if (n < min_size) return 0; eq->comp_head = comp_head; return n; } /* * Free some comps, and return true iff there are still some pending. */ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) { struct tile_net_priv *priv = netdev_priv(dev); lepp_queue_t *eq = priv->eq; struct sk_buff *olds[64]; unsigned int wanted = 64; unsigned int i, n; bool pending; spin_lock(&priv->eq_lock); if (all) eq->comp_busy = eq->comp_tail; n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); pending = (eq->comp_head != eq->comp_tail); spin_unlock(&priv->eq_lock); for (i = 0; i < n; i++) kfree_skb(olds[i]); return pending; } /* * Make sure the egress timer is scheduled. * * Note that we use "schedule if not scheduled" logic instead of the more * obvious "reschedule" logic, because "reschedule" is fairly expensive. */ static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) { if (!info->egress_timer_scheduled) { mod_timer_pinned(&info->egress_timer, jiffies + 1); info->egress_timer_scheduled = true; } } /* * The "function" for "info->egress_timer". * * This timer will reschedule itself as long as there are any pending * completions expected (on behalf of any tile). * * ISSUE: Realistically, will the timer ever stop scheduling itself? * * ISSUE: This timer is almost never actually needed, so just use a global * timer that can run on any tile. * * ISSUE: Maybe instead track number of expected completions, and free * only that many, resetting to zero if "pending" is ever false. */ static void tile_net_handle_egress_timer(unsigned long arg) { struct tile_net_cpu *info = (struct tile_net_cpu *)arg; struct net_device *dev = info->napi.dev; /* The timer is no longer scheduled. */ info->egress_timer_scheduled = false; /* Free comps, and reschedule timer if more are pending. */ if (tile_net_lepp_free_comps(dev, false)) tile_net_schedule_egress_timer(info); } #ifdef IGNORE_DUP_ACKS /* * Help detect "duplicate" ACKs. These are sequential packets (for a * given flow) which are exactly 66 bytes long, sharing everything but * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are * +N, and the Tstamps are usually identical. * * NOTE: Apparently truly duplicate acks (with identical "ack" values), * should not be collapsed, as they are used for some kind of flow control. */ static bool is_dup_ack(char *s1, char *s2, unsigned int len) { int i; unsigned long long ignorable = 0; /* Identification. */ ignorable |= (1ULL << 0x12); ignorable |= (1ULL << 0x13); /* Header checksum. */ ignorable |= (1ULL << 0x18); ignorable |= (1ULL << 0x19); /* ACK. */ ignorable |= (1ULL << 0x2a); ignorable |= (1ULL << 0x2b); ignorable |= (1ULL << 0x2c); ignorable |= (1ULL << 0x2d); /* WinSize. */ ignorable |= (1ULL << 0x30); ignorable |= (1ULL << 0x31); /* Checksum. */ ignorable |= (1ULL << 0x32); ignorable |= (1ULL << 0x33); for (i = 0; i < len; i++, ignorable >>= 1) { if ((ignorable & 1) || (s1[i] == s2[i])) continue; #ifdef TILE_NET_DEBUG /* HACK: Mention non-timestamp diffs. */ if (i < 0x38 && i != 0x2f && net_ratelimit()) pr_info("Diff at 0x%x\n", i); #endif return false; } #ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS /* HACK: Do not suppress truly duplicate ACKs. */ /* ISSUE: Is this actually necessary or helpful? */ if (s1[0x2a] == s2[0x2a] && s1[0x2b] == s2[0x2b] && s1[0x2c] == s2[0x2c] && s1[0x2d] == s2[0x2d]) { return false; } #endif return true; } #endif static void tile_net_discard_aux(struct tile_net_cpu *info, int index) { struct tile_netio_queue *queue = &info->queue; netio_queue_impl_t *qsp = queue->__system_part; netio_queue_user_impl_t *qup = &queue->__user_part; int index2_aux = index + sizeof(netio_pkt_t); int index2 = ((index2_aux == qsp->__packet_receive_queue.__last_packet_plus_one) ? 0 : index2_aux); netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); /* Extract the "linux_buffer_t". */ unsigned int buffer = pkt->__packet.word; /* Convert "linux_buffer_t" to "va". */ void *va = __va((phys_addr_t)(buffer >> 1) << 7); /* Acquire the associated "skb". */ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); struct sk_buff *skb = *skb_ptr; kfree_skb(skb); /* Consume this packet. */ qup->__packet_receive_read = index2; } /* * Like "tile_net_poll()", but just discard packets. */ static void tile_net_discard_packets(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; struct tile_netio_queue *queue = &info->queue; netio_queue_impl_t *qsp = queue->__system_part; netio_queue_user_impl_t *qup = &queue->__user_part; while (qup->__packet_receive_read != qsp->__packet_receive_queue.__packet_write) { int index = qup->__packet_receive_read; tile_net_discard_aux(info, index); } } /* * Handle the next packet. Return true if "processed", false if "filtered". */ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) { struct net_device *dev = info->napi.dev; struct tile_netio_queue *queue = &info->queue; netio_queue_impl_t *qsp = queue->__system_part; netio_queue_user_impl_t *qup = &queue->__user_part; struct tile_net_stats_t *stats = &info->stats; int filter; int index2_aux = index + sizeof(netio_pkt_t); int index2 = ((index2_aux == qsp->__packet_receive_queue.__last_packet_plus_one) ? 0 : index2_aux); netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); /* Extract the packet size. FIXME: Shouldn't the second line */ /* get subtracted? Mostly moot, since it should be "zero". */ unsigned long len = (NETIO_PKT_CUSTOM_LENGTH(pkt) + NET_IP_ALIGN - NETIO_PACKET_PADDING); /* Extract the "linux_buffer_t". */ unsigned int buffer = pkt->__packet.word; /* Extract "small" (vs "large"). */ bool small = ((buffer & 1) != 0); /* Convert "linux_buffer_t" to "va". */ void *va = __va((phys_addr_t)(buffer >> 1) << 7); /* Extract the packet data pointer. */ /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ unsigned char *buf = va + NET_IP_ALIGN; /* Invalidate the packet buffer. */ if (!hash_default) __inv_buffer(buf, len); /* ISSUE: Is this needed? */ dev->last_rx = jiffies; #ifdef TILE_NET_DUMP_PACKETS dump_packet(buf, len, "rx"); #endif /* TILE_NET_DUMP_PACKETS */ #ifdef TILE_NET_VERIFY_INGRESS if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { /* Bug 6624: Includes UDP packets with a "zero" checksum. */ pr_warning("Bad L4 checksum on %d byte packet.\n", len); } if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { dump_packet(buf, len, "rx"); panic("Bad L3 checksum."); } switch (NETIO_PKT_STATUS_M(metadata, pkt)) { case NETIO_PKT_STATUS_OVERSIZE: if (len >= 64) { dump_packet(buf, len, "rx"); panic("Unexpected OVERSIZE."); } break; case NETIO_PKT_STATUS_BAD: pr_warning("Unexpected BAD %ld byte packet.\n", len); } #endif filter = 0; /* ISSUE: Filter TCP packets with "bad" checksums? */ if (!(dev->flags & IFF_UP)) { /* Filter packets received before we're up. */ filter = 1; } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { /* Filter "truncated" packets. */ filter = 1; } else if (!(dev->flags & IFF_PROMISC)) { /* FIXME: Implement HW multicast filter. */ if (!is_multicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; filter = compare_ether_addr(mine, buf); } } if (filter) { /* ISSUE: Update "drop" statistics? */ tile_net_provide_linux_buffer(info, va, small); } else { /* Acquire the associated "skb". */ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); struct sk_buff *skb = *skb_ptr; /* Paranoia. */ if (skb->data != buf) panic("Corrupt linux buffer from LIPP! " "VA=%p, skb=%p, skb->data=%p\n", va, skb, skb->data); /* Encode the actual packet length. */ skb_put(skb, len); /* NOTE: This call also sets "skb->dev = dev". */ skb->protocol = eth_type_trans(skb, dev); /* Avoid recomputing "good" TCP/UDP checksums. */ if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += len; if (small) info->num_needed_small_buffers++; else info->num_needed_large_buffers++; } /* Return four credits after every fourth packet. */ if (--qup->__receive_credit_remaining == 0) { u32 interval = qup->__receive_credit_interval; qup->__receive_credit_remaining = interval; __netio_fastio_return_credits(qup->__fastio_index, interval); } /* Consume this packet. */ qup->__packet_receive_read = index2; return !filter; } /* * Handle some packets for the given device on the current CPU. * * If "tile_net_stop()" is called on some other tile while this * function is running, we will return, hopefully before that * other tile asks us to call "napi_disable()". * * The "rotting packet" race condition occurs if a packet arrives * during the extremely narrow window between the queue appearing to * be empty, and the ingress interrupt being re-enabled. This happens * a LOT under heavy network load. */ static int tile_net_poll(struct napi_struct *napi, int budget) { struct net_device *dev = napi->dev; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; struct tile_netio_queue *queue = &info->queue; netio_queue_impl_t *qsp = queue->__system_part; netio_queue_user_impl_t *qup = &queue->__user_part; unsigned int work = 0; while (priv->active) { int index = qup->__packet_receive_read; if (index == qsp->__packet_receive_queue.__packet_write) break; if (tile_net_poll_aux(info, index)) { if (++work >= budget) goto done; } } napi_complete(&info->napi); if (!priv->active) goto done; /* Re-enable the ingress interrupt. */ enable_percpu_irq(priv->intr_id); /* HACK: Avoid the "rotting packet" problem (see above). */ if (qup->__packet_receive_read != qsp->__packet_receive_queue.__packet_write) { /* ISSUE: Sometimes this returns zero, presumably */ /* because an interrupt was handled for this tile. */ (void)napi_reschedule(&info->napi); } done: if (priv->active) tile_net_provide_needed_buffers(info); return work; } /* * Handle an ingress interrupt for the given device on the current cpu. * * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has * been called! This is probably due to "pending hypervisor downcalls". * * ISSUE: Is there any race condition between the "napi_schedule()" here * and the "napi_complete()" call above? */ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; /* Disable the ingress interrupt. */ disable_percpu_irq(priv->intr_id); /* Ignore unwanted interrupts. */ if (!priv->active) return IRQ_HANDLED; /* ISSUE: Sometimes "info->napi_enabled" is false here. */ napi_schedule(&info->napi); return IRQ_HANDLED; } /* * One time initialization per interface. */ static int tile_net_open_aux(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int ret; int dummy; unsigned int epp_lotar; /* * Find out where EPP memory should be homed. */ ret = hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), NETIO_EPP_SHM_OFF); if (ret < 0) { pr_err("could not read epp_shm_queue lotar.\n"); return -EIO; } /* * Home the page on the EPP. */ { int epp_home = hv_lotar_to_cpu(epp_lotar); homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); } /* * Register the EPP shared memory queue. */ { netio_ipp_address_t ea = { .va = 0, .pa = __pa(priv->eq), .pte = hv_pte(0), .size = EQ_SIZE, }; ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&ea, sizeof(ea), NETIO_EPP_SHM_OFF); if (ret < 0) return -EIO; } /* * Start LIPP/LEPP. */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { pr_warning("Failed to start LIPP/LEPP.\n"); return -EIO; } return 0; } /* * Register with hypervisor on the current CPU. * * Strangely, this function does important things even if it "fails", * which is especially common if the link is not up yet. Hopefully * these things are all "harmless" if done twice! */ static void tile_net_register(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info; struct tile_netio_queue *queue; /* Only network cpus can receive packets. */ int queue_id = cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; netio_input_config_t config = { .flags = 0, .num_receive_packets = priv->network_cpus_credits, .queue_id = queue_id }; int ret = 0; netio_queue_impl_t *queuep; PDEBUG("tile_net_register(queue_id %d)\n", queue_id); if (!strcmp(dev->name, "xgbe0")) info = &__get_cpu_var(hv_xgbe0); else if (!strcmp(dev->name, "xgbe1")) info = &__get_cpu_var(hv_xgbe1); else if (!strcmp(dev->name, "gbe0")) info = &__get_cpu_var(hv_gbe0); else if (!strcmp(dev->name, "gbe1")) info = &__get_cpu_var(hv_gbe1); else BUG(); /* Initialize the egress timer. */ init_timer(&info->egress_timer); info->egress_timer.data = (long)info; info->egress_timer.function = tile_net_handle_egress_timer; priv->cpu[my_cpu] = info; /* * Register ourselves with LIPP. This does a lot of stuff, * including invoking the LIPP registration code. */ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&config, sizeof(netio_input_config_t), NETIO_IPP_INPUT_REGISTER_OFF); PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", ret); if (ret < 0) { if (ret != NETIO_LINK_DOWN) { printk(KERN_DEBUG "hv_dev_pwrite " "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", ret); } info->link_down = (ret == NETIO_LINK_DOWN); return; } /* * Get the pointer to our queue's system part. */ ret = hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&queuep, sizeof(netio_queue_impl_t *), NETIO_IPP_INPUT_REGISTER_OFF); PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", ret); PDEBUG("queuep %p\n", queuep); if (ret <= 0) { /* ISSUE: Shouldn't this be a fatal error? */ pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); return; } queue = &info->queue; queue->__system_part = queuep; memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); /* This is traditionally "config.num_receive_packets / 2". */ queue->__user_part.__receive_credit_interval = 4; queue->__user_part.__receive_credit_remaining = queue->__user_part.__receive_credit_interval; /* * Get a fastio index from the hypervisor. * ISSUE: Shouldn't this check the result? */ ret = hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&queue->__user_part.__fastio_index, sizeof(queue->__user_part.__fastio_index), NETIO_IPP_GET_FASTIO_OFF); PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); /* Now we are registered. */ info->registered = true; } /* * Deregister with hypervisor on the current CPU. * * This simply discards all our credits, so no more packets will be * delivered to this tile. There may still be packets in our queue. * * Also, disable the ingress interrupt. */ static void tile_net_deregister(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; /* Disable the ingress interrupt. */ disable_percpu_irq(priv->intr_id); /* Do nothing else if not registered. */ if (info == NULL || !info->registered) return; { struct tile_netio_queue *queue = &info->queue; netio_queue_user_impl_t *qup = &queue->__user_part; /* Discard all our credits. */ __netio_fastio_return_credits(qup->__fastio_index, -1); } } /* * Unregister with hypervisor on the current CPU. * * Also, disable the ingress interrupt. */ static void tile_net_unregister(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; int ret; int dummy = 0; /* Disable the ingress interrupt. */ disable_percpu_irq(priv->intr_id); /* Do nothing else if not registered. */ if (info == NULL || !info->registered) return; /* Unregister ourselves with LIPP/LEPP. */ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); if (ret < 0) panic("Failed to unregister with LIPP/LEPP!\n"); /* Discard all packets still in our NetIO queue. */ tile_net_discard_packets(dev); /* Reset state. */ info->num_needed_small_buffers = 0; info->num_needed_large_buffers = 0; /* Cancel egress timer. */ del_timer(&info->egress_timer); info->egress_timer_scheduled = false; } /* * Helper function for "tile_net_stop()". * * Also used to handle registration failure in "tile_net_open_inner()", * when the various extra steps in "tile_net_stop()" are not necessary. */ static void tile_net_stop_aux(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int i; int dummy = 0; /* * Unregister all tiles, so LIPP will stop delivering packets. * Also, delete all the "napi" objects (sequentially, to protect * "dev->napi_list"). */ on_each_cpu(tile_net_unregister, (void *)dev, 1); for_each_online_cpu(i) { struct tile_net_cpu *info = priv->cpu[i]; if (info != NULL && info->registered) { netif_napi_del(&info->napi); info->registered = false; } } /* Stop LIPP/LEPP. */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) panic("Failed to stop LIPP/LEPP!\n"); priv->partly_opened = 0; } /* * Disable NAPI for the given device on the current cpu. */ static void tile_net_stop_disable(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; /* Disable NAPI if needed. */ if (info != NULL && info->napi_enabled) { napi_disable(&info->napi); info->napi_enabled = false; } } /* * Enable NAPI and the ingress interrupt for the given device * on the current cpu. * * ISSUE: Only do this for "network cpus"? */ static void tile_net_open_enable(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; /* Enable NAPI. */ napi_enable(&info->napi); info->napi_enabled = true; /* Enable the ingress interrupt. */ enable_percpu_irq(priv->intr_id); } /* * tile_net_open_inner does most of the work of bringing up the interface. * It's called from tile_net_open(), and also from tile_net_retry_open(). * The return value is 0 if the interface was brought up, < 0 if * tile_net_open() should return the return value as an error, and > 0 if * tile_net_open() should return success and schedule a work item to * periodically retry the bringup. */ static int tile_net_open_inner(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info; struct tile_netio_queue *queue; int result = 0; int i; int dummy = 0; /* * First try to register just on the local CPU, and handle any * semi-expected "link down" failure specially. Note that we * do NOT call "tile_net_stop_aux()", unlike below. */ tile_net_register(dev); info = priv->cpu[my_cpu]; if (!info->registered) { if (info->link_down) return 1; return -EAGAIN; } /* * Now register everywhere else. If any registration fails, * even for "link down" (which might not be possible), we * clean up using "tile_net_stop_aux()". Also, add all the * "napi" objects (sequentially, to protect "dev->napi_list"). * ISSUE: Only use "netif_napi_add()" for "network cpus"? */ smp_call_function(tile_net_register, (void *)dev, 1); for_each_online_cpu(i) { struct tile_net_cpu *info = priv->cpu[i]; if (info->registered) netif_napi_add(dev, &info->napi, tile_net_poll, 64); else result = -EAGAIN; } if (result != 0) { tile_net_stop_aux(dev); return result; } queue = &info->queue; if (priv->intr_id == 0) { unsigned int irq; /* * Acquire the irq allocated by the hypervisor. Every * queue gets the same irq. The "__intr_id" field is * "1 << irq", so we use "__ffs()" to extract "irq". */ priv->intr_id = queue->__system_part->__intr_id; BUG_ON(priv->intr_id == 0); irq = __ffs(priv->intr_id); /* * Register the ingress interrupt handler for this * device, permanently. * * We used to call "free_irq()" in "tile_net_stop()", * and then re-register the handler here every time, * but that caused DNP errors in "handle_IRQ_event()" * because "desc->action" was NULL. See bug 9143. */ tile_irq_activate(irq, TILE_IRQ_PERCPU); BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, 0, dev->name, (void *)dev) != 0); } { /* Allocate initial buffers. */ int max_buffers = priv->network_cpus_count * priv->network_cpus_credits; info->num_needed_small_buffers = min(LIPP_SMALL_BUFFERS, max_buffers); info->num_needed_large_buffers = min(LIPP_LARGE_BUFFERS, max_buffers); tile_net_provide_needed_buffers(info); if (info->num_needed_small_buffers != 0 || info->num_needed_large_buffers != 0) panic("Insufficient memory for buffer stack!"); } /* We are about to be active. */ priv->active = true; /* Make sure "active" is visible to all tiles. */ mb(); /* On each tile, enable NAPI and the ingress interrupt. */ on_each_cpu(tile_net_open_enable, (void *)dev, 1); /* Start LIPP/LEPP and activate "ingress" at the shim. */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) panic("Failed to activate the LIPP Shim!\n"); /* Start our transmit queue. */ netif_start_queue(dev); return 0; } /* * Called periodically to retry bringing up the NetIO interface, * if it doesn't come up cleanly during tile_net_open(). */ static void tile_net_open_retry(struct work_struct *w) { struct delayed_work *dw = container_of(w, struct delayed_work, work); struct tile_net_priv *priv = container_of(dw, struct tile_net_priv, retry_work); /* * Try to bring the NetIO interface up. If it fails, reschedule * ourselves to try again later; otherwise, tell Linux we now have * a working link. ISSUE: What if the return value is negative? */ if (tile_net_open_inner(priv->dev) != 0) schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); else netif_carrier_on(priv->dev); } /* * Called when a network interface is made active. * * Returns 0 on success, negative value on failure. * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS (if needed), the watchdog timer * is started, and the stack is notified that the interface is ready. * * If the actual link is not available yet, then we tell Linux that * we have no carrier, and we keep checking until the link comes up. */ static int tile_net_open(struct net_device *dev) { int ret = 0; struct tile_net_priv *priv = netdev_priv(dev); /* * We rely on priv->partly_opened to tell us if this is the * first time this interface is being brought up. If it is * set, the IPP was already initialized and should not be * initialized again. */ if (!priv->partly_opened) { int count; int credits; /* Initialize LIPP/LEPP, and start the Shim. */ ret = tile_net_open_aux(dev); if (ret < 0) { pr_err("tile_net_open_aux failed: %d\n", ret); return ret; } /* Analyze the network cpus. */ if (network_cpus_used) cpumask_copy(&priv->network_cpus_map, &network_cpus_map); else cpumask_copy(&priv->network_cpus_map, cpu_online_mask); count = cpumask_weight(&priv->network_cpus_map); /* Limit credits to available buffers, and apply min. */ credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); /* Apply "GBE" max limit. */ /* ISSUE: Use higher limit for XGBE? */ credits = min(NETIO_MAX_RECEIVE_PKTS, credits); priv->network_cpus_count = count; priv->network_cpus_credits = credits; #ifdef TILE_NET_DEBUG pr_info("Using %d network cpus, with %d credits each\n", priv->network_cpus_count, priv->network_cpus_credits); #endif priv->partly_opened = 1; } else { /* FIXME: Is this possible? */ /* printk("Already partly opened.\n"); */ } /* * Attempt to bring up the link. */ ret = tile_net_open_inner(dev); if (ret <= 0) { if (ret == 0) netif_carrier_on(dev); return ret; } /* * We were unable to bring up the NetIO interface, but we want to * try again in a little bit. Tell Linux that we have no carrier * so it doesn't try to use the interface before the link comes up * and then remember to try again later. */ netif_carrier_off(dev); schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); return 0; } static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) { int n = 0; /* Drain all the LIPP buffers. */ while (true) { int buffer; /* NOTE: This should never fail. */ if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) break; /* Stop when done. */ if (buffer == 0) break; { /* Convert "linux_buffer_t" to "va". */ void *va = __va((phys_addr_t)(buffer >> 1) << 7); /* Acquire the associated "skb". */ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); struct sk_buff *skb = *skb_ptr; kfree_skb(skb); } n++; } return n; } /* * Disables a network interface. * * Returns 0, this is not allowed to fail. * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. * * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? * * Before we are called by "__dev_close()", "netif_running()" will * have been cleared, so no NEW calls to "tile_net_poll()" will be * made by "netpoll_poll_dev()". * * Often, this can cause some tiles to still have packets in their * queues, so we must call "tile_net_discard_packets()" later. * * Note that some other tile may still be INSIDE "tile_net_poll()", * and in fact, many will be, if there is heavy network load. * * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when * any tile is still "napi_schedule()"'d will induce a horrible crash * when "msleep()" is called. This includes tiles which are inside * "tile_net_poll()" which have not yet called "napi_complete()". * * So, we must first try to wait long enough for other tiles to finish * with any current "tile_net_poll()" call, and, hopefully, to clear * the "scheduled" flag. ISSUE: It is unclear what happens to tiles * which have called "napi_schedule()" but which had not yet tried to * call "tile_net_poll()", or which exhausted their budget inside * "tile_net_poll()" just before this function was called. */ static int tile_net_stop(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); PDEBUG("tile_net_stop()\n"); /* Start discarding packets. */ priv->active = false; /* Make sure "active" is visible to all tiles. */ mb(); /* * On each tile, make sure no NEW packets get delivered, and * disable the ingress interrupt. * * Note that the ingress interrupt can fire AFTER this, * presumably due to packets which were recently delivered, * but it will have no effect. */ on_each_cpu(tile_net_deregister, (void *)dev, 1); /* Optimistically drain LIPP buffers. */ (void)tile_net_drain_lipp_buffers(priv); /* ISSUE: Only needed if not yet fully open. */ cancel_delayed_work_sync(&priv->retry_work); /* Can't transmit any more. */ netif_stop_queue(dev); /* Disable NAPI on each tile. */ on_each_cpu(tile_net_stop_disable, (void *)dev, 1); /* * Drain any remaining LIPP buffers. NOTE: This "printk()" * has never been observed, but in theory it could happen. */ if (tile_net_drain_lipp_buffers(priv) != 0) printk("Had to drain some extra LIPP buffers!\n"); /* Stop LIPP/LEPP. */ tile_net_stop_aux(dev); /* * ISSUE: It appears that, in practice anyway, by the time we * get here, there are no pending completions, but just in case, * we free (all of) them anyway. */ while (tile_net_lepp_free_comps(dev, true)) /* loop */; /* Wipe the EPP queue, and wait till the stores hit the EPP. */ memset(priv->eq, 0, sizeof(lepp_queue_t)); mb(); return 0; } /* * Prepare the "frags" info for the resulting LEPP command. * * If needed, flush the memory used by the frags. */ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, struct sk_buff *skb, void *b_data, unsigned int b_len) { unsigned int i, n = 0; struct skb_shared_info *sh = skb_shinfo(skb); phys_addr_t cpa; if (b_len != 0) { if (!hash_default) finv_buffer_remote(b_data, b_len, 0); cpa = __pa(b_data); frags[n].cpa_lo = cpa; frags[n].cpa_hi = cpa >> 32; frags[n].length = b_len; frags[n].hash_for_home = hash_default; n++; } for (i = 0; i < sh->nr_frags; i++) { skb_frag_t *f = &sh->frags[i]; unsigned long pfn = page_to_pfn(f->page); /* FIXME: Compute "hash_for_home" properly. */ /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ int hash_for_home = hash_default; /* FIXME: Hmmm. */ if (!hash_default) { void *va = pfn_to_kaddr(pfn) + f->page_offset; BUG_ON(PageHighMem(f->page)); finv_buffer_remote(va, f->size, 0); } cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; frags[n].cpa_lo = cpa; frags[n].cpa_hi = cpa >> 32; frags[n].length = f->size; frags[n].hash_for_home = hash_for_home; n++; } return n; } /* * This function takes "skb", consisting of a header template and a * payload, and hands it to LEPP, to emit as one or more segments, * each consisting of a possibly modified header, plus a piece of the * payload, via a process known as "tcp segmentation offload". * * Usually, "data" will contain the header template, of size "sh_len", * and "sh->frags" will contain "skb->data_len" bytes of payload, and * there will be "sh->gso_segs" segments. * * Sometimes, if "sendfile()" requires copying, we will be called with * "data" containing the header and payload, with "frags" being empty. * * In theory, "sh->nr_frags" could be 3, but in practice, it seems * that this will never actually happen. * * See "emulate_large_send_offload()" for some reference code, which * does not handle checksumming. * * ISSUE: How do we make sure that high memory DMA does not migrate? */ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; struct tile_net_stats_t *stats = &info->stats; struct skb_shared_info *sh = skb_shinfo(skb); unsigned char *data = skb->data; /* The ip header follows the ethernet header. */ struct iphdr *ih = ip_hdr(skb); unsigned int ih_len = ih->ihl * 4; /* Note that "nh == ih", by definition. */ unsigned char *nh = skb_network_header(skb); unsigned int eh_len = nh - data; /* The tcp header follows the ip header. */ struct tcphdr *th = (struct tcphdr *)(nh + ih_len); unsigned int th_len = th->doff * 4; /* The total number of header bytes. */ /* NOTE: This may be less than skb_headlen(skb). */ unsigned int sh_len = eh_len + ih_len + th_len; /* The number of payload bytes at "skb->data + sh_len". */ /* This is non-zero for sendfile() without HIGHDMA. */ unsigned int b_len = skb_headlen(skb) - sh_len; /* The total number of payload bytes. */ unsigned int d_len = b_len + skb->data_len; /* The maximum payload size. */ unsigned int p_len = sh->gso_size; /* The total number of segments. */ unsigned int num_segs = sh->gso_segs; /* The temporary copy of the command. */ u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; /* Analyze the "frags". */ unsigned int num_frags = tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); /* The size of the command, including frags and header. */ size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); /* The command header. */ lepp_tso_cmd_t cmd_init = { .tso = true, .header_size = sh_len, .ip_offset = eh_len, .tcp_offset = eh_len + ih_len, .payload_size = p_len, .num_frags = num_frags, }; unsigned long irqflags; lepp_queue_t *eq = priv->eq; struct sk_buff *olds[8]; unsigned int wanted = 8; unsigned int i, nolds = 0; unsigned int cmd_head, cmd_tail, cmd_next; unsigned int comp_tail; /* Paranoia. */ BUG_ON(skb->protocol != htons(ETH_P_IP)); BUG_ON(ih->protocol != IPPROTO_TCP); BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); BUG_ON(num_frags > LEPP_MAX_FRAGS); /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ BUG_ON(num_segs <= 1); /* Finish preparing the command. */ /* Copy the command header. */ *cmd = cmd_init; /* Copy the "header". */ memcpy(&cmd->frags[num_frags], data, sh_len); /* Prefetch and wait, to minimize time spent holding the spinlock. */ prefetch_L1(&eq->comp_tail); prefetch_L1(&eq->cmd_tail); mb(); /* Enqueue the command. */ spin_lock_irqsave(&priv->eq_lock, irqflags); /* * Handle completions if needed to make room. * HACK: Spin until there is sufficient room. */ if (lepp_num_free_comp_slots(eq) == 0) { nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); if (nolds == 0) { busy: spin_unlock_irqrestore(&priv->eq_lock, irqflags); return NETDEV_TX_BUSY; } } cmd_head = eq->cmd_head; cmd_tail = eq->cmd_tail; /* Prepare to advance, detecting full queue. */ cmd_next = cmd_tail + cmd_size; if (cmd_tail < cmd_head && cmd_next >= cmd_head) goto busy; if (cmd_next > LEPP_CMD_LIMIT) { cmd_next = 0; if (cmd_next == cmd_head) goto busy; } /* Copy the command. */ memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); /* Advance. */ cmd_tail = cmd_next; /* Record "skb" for eventual freeing. */ comp_tail = eq->comp_tail; eq->comps[comp_tail] = skb; LEPP_QINC(comp_tail); eq->comp_tail = comp_tail; /* Flush before allowing LEPP to handle the command. */ /* ISSUE: Is this the optimal location for the flush? */ __insn_mf(); eq->cmd_tail = cmd_tail; /* NOTE: Using "4" here is more efficient than "0" or "2", */ /* and, strangely, more efficient than pre-checking the number */ /* of available completions, and comparing it to 4. */ if (nolds == 0) nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); spin_unlock_irqrestore(&priv->eq_lock, irqflags); /* Handle completions. */ for (i = 0; i < nolds; i++) kfree_skb(olds[i]); /* Update stats. */ stats->tx_packets += num_segs; stats->tx_bytes += (num_segs * sh_len) + d_len; /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); return NETDEV_TX_OK; } /* * Transmit a packet (called by the kernel via "hard_start_xmit" hook). */ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; struct tile_net_stats_t *stats = &info->stats; unsigned long irqflags; struct skb_shared_info *sh = skb_shinfo(skb); unsigned int len = skb->len; unsigned char *data = skb->data; unsigned int csum_start = skb_checksum_start_offset(skb); lepp_frag_t frags[LEPP_MAX_FRAGS]; unsigned int num_frags; lepp_queue_t *eq = priv->eq; struct sk_buff *olds[8]; unsigned int wanted = 8; unsigned int i, nolds = 0; unsigned int cmd_size = sizeof(lepp_cmd_t); unsigned int cmd_head, cmd_tail, cmd_next; unsigned int comp_tail; lepp_cmd_t cmds[LEPP_MAX_FRAGS]; /* * This is paranoia, since we think that if the link doesn't come * up, telling Linux we have no carrier will keep it from trying * to transmit. If it does, though, we can't execute this routine, * since data structures we depend on aren't set up yet. */ if (!info->registered) return NETDEV_TX_BUSY; /* Save the timestamp. */ dev->trans_start = jiffies; #ifdef TILE_NET_PARANOIA #if CHIP_HAS_CBOX_HOME_MAP() if (hash_default) { HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", data, hv_pte_get_mode(pte), hv_pte_val(pte)); } #endif #endif #ifdef TILE_NET_DUMP_PACKETS /* ISSUE: Does not dump the "frags". */ dump_packet(data, skb_headlen(skb), "tx"); #endif /* TILE_NET_DUMP_PACKETS */ if (sh->gso_size != 0) return tile_net_tx_tso(skb, dev); /* Prepare the commands. */ num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); for (i = 0; i < num_frags; i++) { bool final = (i == num_frags - 1); lepp_cmd_t cmd = { .cpa_lo = frags[i].cpa_lo, .cpa_hi = frags[i].cpa_hi, .length = frags[i].length, .hash_for_home = frags[i].hash_for_home, .send_completion = final, .end_of_packet = final }; if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { cmd.compute_checksum = 1; cmd.checksum_data.bits.start_byte = csum_start; cmd.checksum_data.bits.count = len - csum_start; cmd.checksum_data.bits.destination_byte = csum_start + skb->csum_offset; } cmds[i] = cmd; } /* Prefetch and wait, to minimize time spent holding the spinlock. */ prefetch_L1(&eq->comp_tail); prefetch_L1(&eq->cmd_tail); mb(); /* Enqueue the commands. */ spin_lock_irqsave(&priv->eq_lock, irqflags); /* * Handle completions if needed to make room. * HACK: Spin until there is sufficient room. */ if (lepp_num_free_comp_slots(eq) == 0) { nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); if (nolds == 0) { busy: spin_unlock_irqrestore(&priv->eq_lock, irqflags); return NETDEV_TX_BUSY; } } cmd_head = eq->cmd_head; cmd_tail = eq->cmd_tail; /* Copy the commands, or fail. */ for (i = 0; i < num_frags; i++) { /* Prepare to advance, detecting full queue. */ cmd_next = cmd_tail + cmd_size; if (cmd_tail < cmd_head && cmd_next >= cmd_head) goto busy; if (cmd_next > LEPP_CMD_LIMIT) { cmd_next = 0; if (cmd_next == cmd_head) goto busy; } /* Copy the command. */ *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; /* Advance. */ cmd_tail = cmd_next; } /* Record "skb" for eventual freeing. */ comp_tail = eq->comp_tail; eq->comps[comp_tail] = skb; LEPP_QINC(comp_tail); eq->comp_tail = comp_tail; /* Flush before allowing LEPP to handle the command. */ /* ISSUE: Is this the optimal location for the flush? */ __insn_mf(); eq->cmd_tail = cmd_tail; /* NOTE: Using "4" here is more efficient than "0" or "2", */ /* and, strangely, more efficient than pre-checking the number */ /* of available completions, and comparing it to 4. */ if (nolds == 0) nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); spin_unlock_irqrestore(&priv->eq_lock, irqflags); /* Handle completions. */ for (i = 0; i < nolds; i++) kfree_skb(olds[i]); /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ stats->tx_packets++; stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); /* Make sure the egress timer is scheduled. */ tile_net_schedule_egress_timer(info); return NETDEV_TX_OK; } /* * Deal with a transmit timeout. */ static void tile_net_tx_timeout(struct net_device *dev) { PDEBUG("tile_net_tx_timeout()\n"); PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, jiffies - dev->trans_start); /* XXX: ISSUE: This doesn't seem useful for us. */ netif_wake_queue(dev); } /* * Ioctl commands. */ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { return -EOPNOTSUPP; } /* * Get System Network Statistics. * * Returns the address of the device statistics structure. */ static struct net_device_stats *tile_net_get_stats(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); u32 rx_packets = 0; u32 tx_packets = 0; u32 rx_bytes = 0; u32 tx_bytes = 0; int i; for_each_online_cpu(i) { if (priv->cpu[i]) { rx_packets += priv->cpu[i]->stats.rx_packets; rx_bytes += priv->cpu[i]->stats.rx_bytes; tx_packets += priv->cpu[i]->stats.tx_packets; tx_bytes += priv->cpu[i]->stats.tx_bytes; } } priv->stats.rx_packets = rx_packets; priv->stats.rx_bytes = rx_bytes; priv->stats.tx_packets = tx_packets; priv->stats.tx_bytes = tx_bytes; return &priv->stats; } /* * Change the "mtu". * * The "change_mtu" method is usually not needed. * If you need it, it must be like this. */ static int tile_net_change_mtu(struct net_device *dev, int new_mtu) { PDEBUG("tile_net_change_mtu()\n"); /* Check ranges. */ if ((new_mtu < 68) || (new_mtu > 1500)) return -EINVAL; /* Accept the value. */ dev->mtu = new_mtu; return 0; } /* * Change the Ethernet Address of the NIC. * * The hypervisor driver does not support changing MAC address. However, * the IPP does not do anything with the MAC address, so the address which * gets used on outgoing packets, and which is accepted on incoming packets, * is completely up to the NetIO program or kernel driver which is actually * handling them. * * Returns 0 on success, negative on failure. */ static int tile_net_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; /* ISSUE: Note that "dev_addr" is now a pointer. */ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; } /* * Obtain the MAC address from the hypervisor. * This must be done before opening the device. */ static int tile_net_get_mac(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); char hv_dev_name[32]; int len; __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; int ret; /* For example, "xgbe0". */ strcpy(hv_dev_name, dev->name); len = strlen(hv_dev_name); /* For example, "xgbe/0". */ hv_dev_name[len] = hv_dev_name[len - 1]; hv_dev_name[len - 1] = '/'; len++; /* For example, "xgbe/0/native_hash". */ strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); /* Get the hypervisor handle for this device. */ priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); PDEBUG("hv_dev_open(%s) returned %d %p\n", hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); if (priv->hv_devhdl < 0) { if (priv->hv_devhdl == HV_ENODEV) printk(KERN_DEBUG "Ignoring unconfigured device %s\n", hv_dev_name); else printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", hv_dev_name, priv->hv_devhdl); return -1; } /* * Read the hardware address from the hypervisor. * ISSUE: Note that "dev_addr" is now a pointer. */ offset.bits.class = NETIO_PARAM; offset.bits.addr = NETIO_PARAM_MAC; ret = hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)dev->dev_addr, dev->addr_len, offset.word); PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); if (ret <= 0) { printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", dev->name); /* * Since the device is configured by the hypervisor but we * can't get its MAC address, we are most likely running * the simulator, so let's generate a random MAC address. */ random_ether_addr(dev->dev_addr); } return 0; } static struct net_device_ops tile_net_ops = { .ndo_open = tile_net_open, .ndo_stop = tile_net_stop, .ndo_start_xmit = tile_net_tx, .ndo_do_ioctl = tile_net_ioctl, .ndo_get_stats = tile_net_get_stats, .ndo_change_mtu = tile_net_change_mtu, .ndo_tx_timeout = tile_net_tx_timeout, .ndo_set_mac_address = tile_net_set_mac_address }; /* * The setup function. * * This uses ether_setup() to assign various fields in dev, including * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. */ static void tile_net_setup(struct net_device *dev) { PDEBUG("tile_net_setup()\n"); ether_setup(dev); dev->netdev_ops = &tile_net_ops; dev->watchdog_timeo = TILE_NET_TIMEOUT; /* We want lockless xmit. */ dev->features |= NETIF_F_LLTX; /* We support hardware tx checksums. */ dev->features |= NETIF_F_HW_CSUM; /* We support scatter/gather. */ dev->features |= NETIF_F_SG; /* We support TSO. */ dev->features |= NETIF_F_TSO; #ifdef TILE_NET_GSO /* We support GSO. */ dev->features |= NETIF_F_GSO; #endif if (hash_default) dev->features |= NETIF_F_HIGHDMA; /* ISSUE: We should support NETIF_F_UFO. */ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; dev->mtu = TILE_NET_MTU; } /* * Allocate the device structure, register the device, and obtain the * MAC address from the hypervisor. */ static struct net_device *tile_net_dev_init(const char *name) { int ret; struct net_device *dev; struct tile_net_priv *priv; /* * Allocate the device structure. This allocates "priv", calls * tile_net_setup(), and saves "name". Normally, "name" is a * template, instantiated by register_netdev(), but not for us. */ dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); if (!dev) { pr_err("alloc_netdev(%s) failed\n", name); return NULL; } priv = netdev_priv(dev); /* Initialize "priv". */ memset(priv, 0, sizeof(*priv)); /* Save "dev" for "tile_net_open_retry()". */ priv->dev = dev; INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); spin_lock_init(&priv->eq_lock); /* Allocate "eq". */ priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); if (!priv->eq_pages) { free_netdev(dev); return NULL; } priv->eq = page_address(priv->eq_pages); /* Register the network device. */ ret = register_netdev(dev); if (ret) { pr_err("register_netdev %s failed %d\n", dev->name, ret); __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); return NULL; } /* Get the MAC address. */ ret = tile_net_get_mac(dev); if (ret < 0) { unregister_netdev(dev); __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); return NULL; } return dev; } /* * Module cleanup. * * FIXME: If compiled as a module, this module cannot be "unloaded", * because the "ingress interrupt handler" is registered permanently. */ static void tile_net_cleanup(void) { int i; for (i = 0; i < TILE_NET_DEVS; i++) { if (tile_net_devs[i]) { struct net_device *dev = tile_net_devs[i]; struct tile_net_priv *priv = netdev_priv(dev); unregister_netdev(dev); finv_buffer_remote(priv->eq, EQ_SIZE, 0); __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); } } } /* * Module initialization. */ static int tile_net_init_module(void) { pr_info("Tilera IPP Net Driver\n"); tile_net_devs[0] = tile_net_dev_init("xgbe0"); tile_net_devs[1] = tile_net_dev_init("xgbe1"); tile_net_devs[2] = tile_net_dev_init("gbe0"); tile_net_devs[3] = tile_net_dev_init("gbe1"); return 0; } module_init(tile_net_init_module); module_exit(tile_net_cleanup); #ifndef MODULE /* * The "network_cpus" boot argument specifies the cpus that are dedicated * to handle ingress packets. * * The parameter should be in the form "network_cpus=m-n[,x-y]", where * m, n, x, y are integer numbers that represent the cpus that can be * neither a dedicated cpu nor a dataplane cpu. */ static int __init network_cpus_setup(char *str) { int rc = cpulist_parse_crop(str, &network_cpus_map); if (rc != 0) { pr_warning("network_cpus=%s: malformed cpu list\n", str); } else { /* Remove dedicated cpus. */ cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); if (cpumask_empty(&network_cpus_map)) { pr_warning("Ignoring network_cpus='%s'.\n", str); } else { char buf[1024]; cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); pr_info("Linux network CPUs: %s\n", buf); network_cpus_used = true; } } return 0; } __setup("network_cpus=", network_cpus_setup); #endif
gpl-2.0
dlumberg/kernel_asus_tf101
drivers/net/wireless/bcm4329/bcmsdstd_linux.c
4040
6395
/* * 'Standard' SDIO HOST CONTROLLER driver - linux portion * * Copyright (C) 1999-2010, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: bcmsdstd_linux.c,v 1.11.18.2.16.1 2010/08/17 17:03:13 Exp $ */ #include <typedefs.h> #include <pcicfg.h> #include <bcmutils.h> #include <sdio.h> /* SDIO Specs */ #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ #include <sdiovar.h> /* to get msglevel bit values */ #include <linux/sched.h> /* request_irq() */ #include <bcmsdstd.h> struct sdos_info { sdioh_info_t *sd; spinlock_t lock; wait_queue_head_t intr_wait_queue; }; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) #define BLOCKABLE() (!in_atomic()) #else #define BLOCKABLE() (!in_interrupt()) #endif /* Interrupt handler */ static irqreturn_t sdstd_isr(int irq, void *dev_id #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) , struct pt_regs *ptregs #endif ) { sdioh_info_t *sd; struct sdos_info *sdos; bool ours; sd = (sdioh_info_t *)dev_id; if (!sd->card_init_done) { sd_err(("%s: Hey Bogus intr...not even initted: irq %d\n", __FUNCTION__, irq)); return IRQ_RETVAL(FALSE); } else { ours = check_client_intr(sd); /* For local interrupts, wake the waiting process */ if (ours && sd->got_hcint) { sd_trace(("INTR->WAKE\n")); sdos = (struct sdos_info *)sd->sdos_info; wake_up_interruptible(&sdos->intr_wait_queue); } return IRQ_RETVAL(ours); } } /* Register with Linux for interrupts */ int sdstd_register_irq(sdioh_info_t *sd, uint irq) { sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq)); if (request_irq(irq, sdstd_isr, IRQF_SHARED, "bcmsdstd", sd) < 0) { sd_err(("%s: request_irq() failed\n", __FUNCTION__)); return ERROR; } return SUCCESS; } /* Free Linux irq */ void sdstd_free_irq(uint irq, sdioh_info_t *sd) { free_irq(irq, sd); } /* Map Host controller registers */ uint32 * sdstd_reg_map(osl_t *osh, int32 addr, int size) { return (uint32 *)REG_MAP(addr, size); } void sdstd_reg_unmap(osl_t *osh, int32 addr, int size) { REG_UNMAP((void*)(uintptr)addr); } int sdstd_osinit(sdioh_info_t *sd) { struct sdos_info *sdos; sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info)); sd->sdos_info = (void*)sdos; if (sdos == NULL) return BCME_NOMEM; sdos->sd = sd; spin_lock_init(&sdos->lock); init_waitqueue_head(&sdos->intr_wait_queue); return BCME_OK; } void sdstd_osfree(sdioh_info_t *sd) { struct sdos_info *sdos; ASSERT(sd && sd->sdos_info); sdos = (struct sdos_info *)sd->sdos_info; MFREE(sd->osh, sdos, sizeof(struct sdos_info)); } /* Interrupt enable/disable */ SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *sd, bool enable) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); if (!(sd->host_init_done && sd->card_init_done)) { sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } /* Ensure atomicity for enable/disable calls */ spin_lock_irqsave(&sdos->lock, flags); sd->client_intr_enabled = enable; if (enable && !sd->lockcount) sdstd_devintr_on(sd); else sdstd_devintr_off(sd); spin_unlock_irqrestore(&sdos->lock, flags); return SDIOH_API_RC_SUCCESS; } /* Protect against reentrancy (disable device interrupts while executing) */ void sdstd_lock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); spin_lock_irqsave(&sdos->lock, flags); if (sd->lockcount) { sd_err(("%s: Already locked! called from %p\n", __FUNCTION__, __builtin_return_address(0))); ASSERT(sd->lockcount == 0); } sdstd_devintr_off(sd); sd->lockcount++; spin_unlock_irqrestore(&sdos->lock, flags); } /* Enable client interrupt */ void sdstd_unlock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); ASSERT(sd->lockcount > 0); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); spin_lock_irqsave(&sdos->lock, flags); if (--sd->lockcount == 0 && sd->client_intr_enabled) { sdstd_devintr_on(sd); } spin_unlock_irqrestore(&sdos->lock, flags); } uint16 sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield) { struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; #ifndef BCMSDYIELD ASSERT(!yield); #endif sd_trace(("%s: int 0x%02x err 0x%02x yield %d canblock %d\n", __FUNCTION__, norm, err, yield, BLOCKABLE())); /* Clear the "interrupt happened" flag and last intrstatus */ sd->got_hcint = FALSE; sd->last_intrstatus = 0; #ifdef BCMSDYIELD if (yield && BLOCKABLE()) { /* Enable interrupts, wait for the indication, then disable */ sdstd_intrs_on(sd, norm, err); wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); sdstd_intrs_off(sd, norm, err); } else #endif /* BCMSDYIELD */ { sdstd_spinbits(sd, norm, err); } sd_trace(("%s: last_intrstatus 0x%04x\n", __FUNCTION__, sd->last_intrstatus)); return sd->last_intrstatus; }
gpl-2.0
sztena/DG08_android4.2
drivers/usb/serial/hp4x.c
4296
2074
/* * HP4x Calculators Serial USB driver * * Copyright (C) 2005 Arthur Huillet (ahuillet@users.sf.net) * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * See Documentation/usb/usb-serial.txt for more information on using this * driver */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> /* * Version Information */ #define DRIVER_VERSION "v1.00" #define DRIVER_DESC "HP4x (48/49) Generic Serial driver" #define HP_VENDOR_ID 0x03f0 #define HP49GP_PRODUCT_ID 0x0121 static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver hp49gp_driver = { .name = "hp4X", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, .no_dynamic_id = 1, }; static struct usb_serial_driver hp49gp_device = { .driver = { .owner = THIS_MODULE, .name = "hp4X", }, .id_table = id_table, .usb_driver = &hp49gp_driver, .num_ports = 1, }; static int __init hp49gp_init(void) { int retval; retval = usb_serial_register(&hp49gp_device); if (retval) goto failed_usb_serial_register; retval = usb_register(&hp49gp_driver); if (retval) goto failed_usb_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_usb_register: usb_serial_deregister(&hp49gp_device); failed_usb_serial_register: return retval; } static void __exit hp49gp_exit(void) { usb_deregister(&hp49gp_driver); usb_serial_deregister(&hp49gp_device); } module_init(hp49gp_init); module_exit(hp49gp_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
Rockr172/villeu-4.2
arch/mips/netlogic/xlr/platform.c
4552
2401
/* * Copyright 2011, Netlogic Microsystems. * Copyright 2004, Matt Porter <mporter@kernel.crashing.org> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/resource.h> #include <linux/serial_8250.h> #include <linux/serial_reg.h> #include <asm/netlogic/haldefs.h> #include <asm/netlogic/xlr/iomap.h> #include <asm/netlogic/xlr/pic.h> #include <asm/netlogic/xlr/xlr.h> unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) { uint64_t uartbase; unsigned int value; /* sign extend to 64 bits, if needed */ uartbase = (uint64_t)(long)p->membase; value = nlm_read_reg(uartbase, offset); /* See XLR/XLS errata */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; return value; } void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) { uint64_t uartbase; /* sign extend to 64 bits, if needed */ uartbase = (uint64_t)(long)p->membase; /* See XLR/XLS errata */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; nlm_write_reg(uartbase, offset, value); } #define PORT(_irq) \ { \ .irq = _irq, \ .regshift = 2, \ .iotype = UPIO_MEM32, \ .flags = (UPF_SKIP_TEST | \ UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\ .uartclk = PIC_CLKS_PER_SEC, \ .type = PORT_16550A, \ .serial_in = nlm_xlr_uart_in, \ .serial_out = nlm_xlr_uart_out, \ } static struct plat_serial8250_port xlr_uart_data[] = { PORT(PIC_UART_0_IRQ), PORT(PIC_UART_1_IRQ), {}, }; static struct platform_device uart_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = xlr_uart_data, }, }; static int __init nlm_uart_init(void) { unsigned long uartbase; uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET); xlr_uart_data[0].membase = (void __iomem *)uartbase; xlr_uart_data[0].mapbase = CPHYSADDR(uartbase); uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_1_OFFSET); xlr_uart_data[1].membase = (void __iomem *)uartbase; xlr_uart_data[1].mapbase = CPHYSADDR(uartbase); return platform_device_register(&uart_device); } arch_initcall(nlm_uart_init);
gpl-2.0
davidmueller13/valexKernel-lt03wifi
arch/mips/sgi-ip27/ip27-memory.c
4552
12031
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2000 by Silicon Graphics, Inc. * Copyright (C) 2004 by Christoph Hellwig * * On SGI IP27 the ARC memory configuration data is completly bogus but * alternate easier to use mechanisms are available. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/nodemask.h> #include <linux/swap.h> #include <linux/bootmem.h> #include <linux/pfn.h> #include <linux/highmem.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/sn/arch.h> #include <asm/sn/hub.h> #include <asm/sn/klconfig.h> #include <asm/sn/sn_private.h> #define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT) #define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT) struct node_data *__node_data[MAX_COMPACT_NODES]; EXPORT_SYMBOL(__node_data); static int fine_mode; static int is_fine_dirmode(void) { return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE); } static hubreg_t get_region(cnodeid_t cnode) { if (fine_mode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; else return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT; } static hubreg_t region_mask; static void gen_region_mask(hubreg_t *region_mask) { cnodeid_t cnode; (*region_mask) = 0; for_each_online_node(cnode) { (*region_mask) |= 1ULL << get_region(cnode); } } #define rou_rflag rou_flags static int router_distance; static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) { klrou_t *router; lboard_t *brd; int port; if (router_a->rou_rflag == 1) return; if (depth >= router_distance) return; router_a->rou_rflag = 1; for (port = 1; port <= MAX_ROUTER_PORTS; port++) { if (router_a->rou_port[port].port_nasid == INVALID_NASID) continue; brd = (lboard_t *)NODE_OFFSET_TO_K0( router_a->rou_port[port].port_nasid, router_a->rou_port[port].port_offset); if (brd->brd_type == KLTYPE_ROUTER) { router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); if (router == router_b) { if (depth < router_distance) router_distance = depth; } else router_recurse(router, router_b, depth + 1); } } router_a->rou_rflag = 0; } unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) { klrou_t *router, *router_a = NULL, *router_b = NULL; lboard_t *brd, *dest_brd; cnodeid_t cnode; nasid_t nasid; int port; /* Figure out which routers nodes in question are connected to */ for_each_online_node(cnode) { nasid = COMPACT_TO_NASID_NODEID(cnode); if (nasid == -1) continue; brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_ROUTER); if (!brd) continue; do { if (brd->brd_flags & DUPLICATE_BOARD) continue; router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); router->rou_rflag = 0; for (port = 1; port <= MAX_ROUTER_PORTS; port++) { if (router->rou_port[port].port_nasid == INVALID_NASID) continue; dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( router->rou_port[port].port_nasid, router->rou_port[port].port_offset); if (dest_brd->brd_type == KLTYPE_IP27) { if (dest_brd->brd_nasid == nasid_a) router_a = router; if (dest_brd->brd_nasid == nasid_b) router_b = router; } } } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER))); } if (router_a == NULL) { printk("node_distance: router_a NULL\n"); return -1; } if (router_b == NULL) { printk("node_distance: router_b NULL\n"); return -1; } if (nasid_a == nasid_b) return 0; if (router_a == router_b) return 1; router_distance = 100; router_recurse(router_a, router_b, 2); return router_distance; } static void __init init_topology_matrix(void) { nasid_t nasid, nasid2; cnodeid_t row, col; for (row = 0; row < MAX_COMPACT_NODES; row++) for (col = 0; col < MAX_COMPACT_NODES; col++) __node_distances[row][col] = -1; for_each_online_node(row) { nasid = COMPACT_TO_NASID_NODEID(row); for_each_online_node(col) { nasid2 = COMPACT_TO_NASID_NODEID(col); __node_distances[row][col] = compute_node_distance(nasid, nasid2); } } } static void __init dump_topology(void) { nasid_t nasid; cnodeid_t cnode; lboard_t *brd, *dest_brd; int port; int router_num = 0; klrou_t *router; cnodeid_t row, col; printk("************** Topology ********************\n"); printk(" "); for_each_online_node(col) printk("%02d ", col); printk("\n"); for_each_online_node(row) { printk("%02d ", row); for_each_online_node(col) printk("%2d ", node_distance(row, col)); printk("\n"); } for_each_online_node(cnode) { nasid = COMPACT_TO_NASID_NODEID(cnode); if (nasid == -1) continue; brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_ROUTER); if (!brd) continue; do { if (brd->brd_flags & DUPLICATE_BOARD) continue; printk("Router %d:", router_num); router_num++; router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); for (port = 1; port <= MAX_ROUTER_PORTS; port++) { if (router->rou_port[port].port_nasid == INVALID_NASID) continue; dest_brd = (lboard_t *)NODE_OFFSET_TO_K0( router->rou_port[port].port_nasid, router->rou_port[port].port_offset); if (dest_brd->brd_type == KLTYPE_IP27) printk(" %d", dest_brd->brd_nasid); if (dest_brd->brd_type == KLTYPE_ROUTER) printk(" r"); } printk("\n"); } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) ); } } static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); } static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) { nasid_t nasid; lboard_t *brd; klmembnk_t *banks; unsigned long size; nasid = COMPACT_TO_NASID_NODEID(node); /* Find the node board */ brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27); if (!brd) return 0; /* Get the memory bank structure */ banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK); if (!banks) return 0; /* Size in _Megabytes_ */ size = (unsigned long)banks->membnk_bnksz[slot/4]; /* hack for 128 dimm banks */ if (size <= 128) { if (slot % 4 == 0) { size <<= 20; /* size in bytes */ return(size >> PAGE_SHIFT); } else return 0; } else { size /= 4; size <<= 20; return size >> PAGE_SHIFT; } } static void __init mlreset(void) { int i; master_nasid = get_nasid(); fine_mode = is_fine_dirmode(); /* * Probe for all CPUs - this creates the cpumask and sets up the * mapping tables. We need to do this as early as possible. */ #ifdef CONFIG_SMP cpu_node_probe(); #endif init_topology_matrix(); dump_topology(); gen_region_mask(&region_mask); setup_replication_mask(); /* * Set all nodes' calias sizes to 8k */ for_each_online_node(i) { nasid_t nasid; nasid = COMPACT_TO_NASID_NODEID(i); /* * Always have node 0 in the region mask, otherwise * CALIAS accesses get exceptions since the hub * thinks it is a node 0 address. */ REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1)); #ifdef CONFIG_REPLICATE_EXHANDLERS REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K); #else REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0); #endif #ifdef LATER /* * Set up all hubs to have a big window pointing at * widget 0. Memory mode, widget 0, offset 0 */ REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN), ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) | (0 << IIO_ITTE_WIDGET_SHIFT))); #endif } } static void __init szmem(void) { pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ int slot; cnodeid_t node; num_physpages = 0; for_each_online_node(node) { nodebytes = 0; for (slot = 0; slot < MAX_MEM_SLOTS; slot++) { slot_psize = slot_psize_compute(node, slot); if (slot == 0) slot0sz = slot_psize; /* * We need to refine the hack when we have replicated * kernel text. */ nodebytes += (1LL << SLOT_SHIFT); if (!slot_psize) continue; if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > (slot0sz << PAGE_SHIFT)) { printk("Ignoring slot %d onwards on node %d\n", slot, node); slot = MAX_MEM_SLOTS; continue; } num_physpages += slot_psize; memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)), PFN_PHYS(slot_psize), node); } } } static void __init node_mem_init(cnodeid_t node) { pfn_t slot_firstpfn = slot_getbasepfn(node, 0); pfn_t slot_freepfn = node_getfirstfree(node); unsigned long bootmap_size; pfn_t start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); /* * Allocate the node data structures on the node first. */ __node_data[node] = __va(slot_freepfn << PAGE_SHIFT); NODE_DATA(node)->bdata = &bootmem_node_data[node]; NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; cpus_clear(hub_data(node)->h_cpus); slot_freepfn += PFN_UP(sizeof(struct pglist_data) + sizeof(struct hub_data)); bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn, start_pfn, end_pfn); free_bootmem_with_active_regions(node, end_pfn); reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT, ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size, BOOTMEM_DEFAULT); sparse_memory_present_with_active_regions(node); } /* * A node with nothing. We use it to avoid any special casing in * cpumask_of_node */ static struct node_data null_node = { .hub = { .h_cpus = CPU_MASK_NONE } }; /* * Currently, the intranode memory hole support assumes that each slot * contains at least 32 MBytes of memory. We assume all bootmem data * fits on the first slot. */ void __init prom_meminit(void) { cnodeid_t node; mlreset(); szmem(); for (node = 0; node < MAX_COMPACT_NODES; node++) { if (node_online(node)) { node_mem_init(node); continue; } __node_data[node] = &null_node; } } void __init prom_free_prom_memory(void) { /* We got nothing to free here ... */ } extern unsigned long setup_zero_pages(void); void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; unsigned node; pagetable_init(); for_each_online_node(node) { pfn_t start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); if (end_pfn > max_low_pfn) max_low_pfn = end_pfn; } zones_size[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(zones_size); } void __init mem_init(void) { unsigned long codesize, datasize, initsize, tmp; unsigned node; high_memory = (void *) __va(num_physpages << PAGE_SHIFT); for_each_online_node(node) { /* * This will free up the bootmem, ie, slot 0 memory. */ totalram_pages += free_all_bootmem_node(NODE_DATA(node)); } totalram_pages -= setup_zero_pages(); /* This comes from node 0 */ codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; tmp = nr_free_pages(); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", tmp << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, (num_physpages - tmp) << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); }
gpl-2.0
InfinitiveOS-Devices/android_kernel_lge_hammerhead
drivers/net/ethernet/cisco/enic/enic_main.c
4808
64625
/* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/ethtool.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/rtnetlink.h> #include <linux/prefetch.h> #include <net/ip6_checksum.h> #include "cq_enet_desc.h" #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_vic.h" #include "enic_res.h" #include "enic.h" #include "enic_dev.h" #include "enic_pp.h" #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) #define MAX_TSO (1 << 16) #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ /* Supported devices */ static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, { 0, } /* end of table */ }; MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, enic_id_table); struct enic_stat { char name[ETH_GSTRING_LEN]; unsigned int offset; }; #define ENIC_TX_STAT(stat) \ { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } #define ENIC_RX_STAT(stat) \ { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } static const struct enic_stat enic_tx_stats[] = { ENIC_TX_STAT(tx_frames_ok), ENIC_TX_STAT(tx_unicast_frames_ok), ENIC_TX_STAT(tx_multicast_frames_ok), ENIC_TX_STAT(tx_broadcast_frames_ok), ENIC_TX_STAT(tx_bytes_ok), ENIC_TX_STAT(tx_unicast_bytes_ok), ENIC_TX_STAT(tx_multicast_bytes_ok), ENIC_TX_STAT(tx_broadcast_bytes_ok), ENIC_TX_STAT(tx_drops), ENIC_TX_STAT(tx_errors), ENIC_TX_STAT(tx_tso), }; static const struct enic_stat enic_rx_stats[] = { ENIC_RX_STAT(rx_frames_ok), ENIC_RX_STAT(rx_frames_total), ENIC_RX_STAT(rx_unicast_frames_ok), ENIC_RX_STAT(rx_multicast_frames_ok), ENIC_RX_STAT(rx_broadcast_frames_ok), ENIC_RX_STAT(rx_bytes_ok), ENIC_RX_STAT(rx_unicast_bytes_ok), ENIC_RX_STAT(rx_multicast_bytes_ok), ENIC_RX_STAT(rx_broadcast_bytes_ok), ENIC_RX_STAT(rx_drop), ENIC_RX_STAT(rx_no_bufs), ENIC_RX_STAT(rx_errors), ENIC_RX_STAT(rx_rss), ENIC_RX_STAT(rx_crc_errors), ENIC_RX_STAT(rx_frames_64), ENIC_RX_STAT(rx_frames_127), ENIC_RX_STAT(rx_frames_255), ENIC_RX_STAT(rx_frames_511), ENIC_RX_STAT(rx_frames_1023), ENIC_RX_STAT(rx_frames_1518), ENIC_RX_STAT(rx_frames_to_max), }; static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); int enic_is_dynamic(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; } int enic_sriov_enabled(struct enic *enic) { return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; } static int enic_is_sriov_vf(struct enic *enic) { return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; } int enic_is_valid_vf(struct enic *enic, int vf) { #ifdef CONFIG_PCI_IOV return vf >= 0 && vf < enic->num_vfs; #else return 0; #endif } static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) { return rq; } static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) { return enic->rq_count + wq; } static inline unsigned int enic_legacy_io_intr(void) { return 0; } static inline unsigned int enic_legacy_err_intr(void) { return 1; } static inline unsigned int enic_legacy_notify_intr(void) { return 2; } static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) { return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; } static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq) { return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; } static inline unsigned int enic_msix_err_intr(struct enic *enic) { return enic->rq_count + enic->wq_count; } static inline unsigned int enic_msix_notify_intr(struct enic *enic) { return enic->rq_count + enic->wq_count + 1; } static int enic_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct enic *enic = netdev_priv(netdev); ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->transceiver = XCVR_EXTERNAL; if (netif_carrier_ok(netdev)) { ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); ecmd->duplex = DUPLEX_FULL; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } ecmd->autoneg = AUTONEG_DISABLE; return 0; } static void enic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct enic *enic = netdev_priv(netdev); struct vnic_devcmd_fw_info *fw_info; enic_dev_fw_info(enic, &fw_info); strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, fw_info->fw_version, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(enic->pdev), sizeof(drvinfo->bus_info)); } static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { unsigned int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < enic_n_tx_stats; i++) { memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (i = 0; i < enic_n_rx_stats; i++) { memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; } } static int enic_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return enic_n_tx_stats + enic_n_rx_stats; default: return -EOPNOTSUPP; } } static void enic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; enic_dev_stats_dump(enic, &vstats); for (i = 0; i < enic_n_tx_stats; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; for (i = 0; i < enic_n_rx_stats; i++) *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; } static u32 enic_get_msglevel(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); return enic->msg_enable; } static void enic_set_msglevel(struct net_device *netdev, u32 value) { struct enic *enic = netdev_priv(netdev); enic->msg_enable = value; } static int enic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { struct enic *enic = netdev_priv(netdev); ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; return 0; } static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { struct enic *enic = netdev_priv(netdev); u32 tx_coalesce_usecs; u32 rx_coalesce_usecs; unsigned int i, intr; tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, vnic_dev_get_intr_coal_timer_max(enic->vdev)); rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, vnic_dev_get_intr_coal_timer_max(enic->vdev)); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: if (tx_coalesce_usecs != rx_coalesce_usecs) return -EINVAL; intr = enic_legacy_io_intr(); vnic_intr_coalescing_timer_set(&enic->intr[intr], tx_coalesce_usecs); break; case VNIC_DEV_INTR_MODE_MSI: if (tx_coalesce_usecs != rx_coalesce_usecs) return -EINVAL; vnic_intr_coalescing_timer_set(&enic->intr[0], tx_coalesce_usecs); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], tx_coalesce_usecs); } for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], rx_coalesce_usecs); } break; default: break; } enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; } static const struct ethtool_ops enic_ethtool_ops = { .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, .get_msglevel = enic_get_msglevel, .set_msglevel = enic_set_msglevel, .get_link = ethtool_op_get_link, .get_strings = enic_get_strings, .get_sset_count = enic_get_sset_count, .get_ethtool_stats = enic_get_ethtool_stats, .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, }; static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { struct enic *enic = vnic_dev_priv(wq->vdev); if (buf->sop) pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); else pci_unmap_page(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE); if (buf->os_buf) dev_kfree_skb_any(buf->os_buf); } static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { enic_free_wq_buf(wq, buf); } static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); spin_lock(&enic->wq_lock[q_number]); vnic_wq_service(&enic->wq[q_number], cq_desc, completed_index, enic_wq_free_buf, opaque); if (netif_queue_stopped(enic->netdev) && vnic_wq_desc_avail(&enic->wq[q_number]) >= (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) netif_wake_queue(enic->netdev); spin_unlock(&enic->wq_lock[q_number]); return 0; } static void enic_log_q_error(struct enic *enic) { unsigned int i; u32 error_status; for (i = 0; i < enic->wq_count; i++) { error_status = vnic_wq_error_status(&enic->wq[i]); if (error_status) netdev_err(enic->netdev, "WQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < enic->rq_count; i++) { error_status = vnic_rq_error_status(&enic->rq[i]); if (error_status) netdev_err(enic->netdev, "RQ[%d] error_status %d\n", i, error_status); } } static void enic_msglvl_check(struct enic *enic) { u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); if (msg_enable != enic->msg_enable) { netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", enic->msg_enable, msg_enable); enic->msg_enable = msg_enable; } } static void enic_mtu_check(struct enic *enic) { u32 mtu = vnic_dev_mtu(enic->vdev); struct net_device *netdev = enic->netdev; if (mtu && mtu != enic->port_mtu) { enic->port_mtu = mtu; if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, mtu)); if (mtu != netdev->mtu) schedule_work(&enic->change_mtu_work); } else { if (mtu < netdev->mtu) netdev_warn(netdev, "interface MTU (%d) set higher " "than switch port MTU (%d)\n", netdev->mtu, mtu); } } } static void enic_link_check(struct enic *enic) { int link_status = vnic_dev_link_status(enic->vdev); int carrier_ok = netif_carrier_ok(enic->netdev); if (link_status && !carrier_ok) { netdev_info(enic->netdev, "Link UP\n"); netif_carrier_on(enic->netdev); } else if (!link_status && carrier_ok) { netdev_info(enic->netdev, "Link DOWN\n"); netif_carrier_off(enic->netdev); } } static void enic_notify_check(struct enic *enic) { enic_msglvl_check(enic); enic_mtu_check(enic); enic_link_check(enic); } #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) static irqreturn_t enic_isr_legacy(int irq, void *data) { struct net_device *netdev = data; struct enic *enic = netdev_priv(netdev); unsigned int io_intr = enic_legacy_io_intr(); unsigned int err_intr = enic_legacy_err_intr(); unsigned int notify_intr = enic_legacy_notify_intr(); u32 pba; vnic_intr_mask(&enic->intr[io_intr]); pba = vnic_intr_legacy_pba(enic->legacy_pba); if (!pba) { vnic_intr_unmask(&enic->intr[io_intr]); return IRQ_NONE; /* not our interrupt */ } if (ENIC_TEST_INTR(pba, notify_intr)) { vnic_intr_return_all_credits(&enic->intr[notify_intr]); enic_notify_check(enic); } if (ENIC_TEST_INTR(pba, err_intr)) { vnic_intr_return_all_credits(&enic->intr[err_intr]); enic_log_q_error(enic); /* schedule recovery from WQ/RQ error */ schedule_work(&enic->reset); return IRQ_HANDLED; } if (ENIC_TEST_INTR(pba, io_intr)) { if (napi_schedule_prep(&enic->napi[0])) __napi_schedule(&enic->napi[0]); } else { vnic_intr_unmask(&enic->intr[io_intr]); } return IRQ_HANDLED; } static irqreturn_t enic_isr_msi(int irq, void *data) { struct enic *enic = data; /* With MSI, there is no sharing of interrupts, so this is * our interrupt and there is no need to ack it. The device * is not providing per-vector masking, so the OS will not * write to PCI config space to mask/unmask the interrupt. * We're using mask_on_assertion for MSI, so the device * automatically masks the interrupt when the interrupt is * generated. Later, when exiting polling, the interrupt * will be unmasked (see enic_poll). * * Also, the device uses the same PCIe Traffic Class (TC) * for Memory Write data and MSI, so there are no ordering * issues; the MSI will always arrive at the Root Complex * _after_ corresponding Memory Writes (i.e. descriptor * writes). */ napi_schedule(&enic->napi[0]); return IRQ_HANDLED; } static irqreturn_t enic_isr_msix_rq(int irq, void *data) { struct napi_struct *napi = data; /* schedule NAPI polling for RQ cleanup */ napi_schedule(napi); return IRQ_HANDLED; } static irqreturn_t enic_isr_msix_wq(int irq, void *data) { struct enic *enic = data; unsigned int cq = enic_cq_wq(enic, 0); unsigned int intr = enic_msix_wq_intr(enic, 0); unsigned int wq_work_to_do = -1; /* no limit */ unsigned int wq_work_done; wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, enic_wq_service, NULL); vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t enic_isr_msix_err(int irq, void *data) { struct enic *enic = data; unsigned int intr = enic_msix_err_intr(enic); vnic_intr_return_all_credits(&enic->intr[intr]); enic_log_q_error(enic); /* schedule recovery from WQ/RQ error */ schedule_work(&enic->reset); return IRQ_HANDLED; } static irqreturn_t enic_isr_msix_notify(int irq, void *data) { struct enic *enic = data; unsigned int intr = enic_msix_notify_intr(enic); vnic_intr_return_all_credits(&enic->intr[intr]); enic_notify_check(enic); return IRQ_HANDLED; } static inline void enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, unsigned int len_left, int loopback) { const skb_frag_t *frag; /* Queue additional data fragments */ for (frag = skb_shinfo(skb)->frags; len_left; frag++) { len_left -= skb_frag_size(frag); enic_queue_wq_desc_cont(wq, skb, skb_frag_dma_map(&enic->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE), skb_frag_size(frag), (len_left == 0), /* EOP? */ loopback); } } static inline void enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int head_len = skb_headlen(skb); unsigned int len_left = skb->len - head_len; int eop = (len_left == 0); /* Queue the main skb fragment. The fragments are no larger * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor * per fragment is queued. */ enic_queue_wq_desc(wq, skb, pci_map_single(enic->pdev, skb->data, head_len, PCI_DMA_TODEVICE), head_len, vlan_tag_insert, vlan_tag, eop, loopback); if (!eop) enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); } static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int head_len = skb_headlen(skb); unsigned int len_left = skb->len - head_len; unsigned int hdr_len = skb_checksum_start_offset(skb); unsigned int csum_offset = hdr_len + skb->csum_offset; int eop = (len_left == 0); /* Queue the main skb fragment. The fragments are no larger * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor * per fragment is queued. */ enic_queue_wq_desc_csum_l4(wq, skb, pci_map_single(enic->pdev, skb->data, head_len, PCI_DMA_TODEVICE), head_len, csum_offset, hdr_len, vlan_tag_insert, vlan_tag, eop, loopback); if (!eop) enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); } static inline void enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, int vlan_tag_insert, unsigned int vlan_tag, int loopback) { unsigned int frag_len_left = skb_headlen(skb); unsigned int len_left = skb->len - frag_len_left; unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int eop = (len_left == 0); unsigned int len; dma_addr_t dma_addr; unsigned int offset = 0; skb_frag_t *frag; /* Preload TCP csum field with IP pseudo hdr calculated * with IP length set to zero. HW will later add in length * to each TCP segment resulting from the TSO. */ if (skb->protocol == cpu_to_be16(ETH_P_IP)) { ip_hdr(skb)->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for the main skb fragment */ while (frag_len_left) { len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, PCI_DMA_TODEVICE); enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, vlan_tag_insert, vlan_tag, eop && (len == frag_len_left), loopback); frag_len_left -= len; offset += len; } if (eop) return; /* Queue WQ_ENET_MAX_DESC_LEN length descriptors * for additional data fragments */ for (frag = skb_shinfo(skb)->frags; len_left; frag++) { len_left -= skb_frag_size(frag); frag_len_left = skb_frag_size(frag); offset = 0; while (frag_len_left) { len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, offset, len, DMA_TO_DEVICE); enic_queue_wq_desc_cont(wq, skb, dma_addr, len, (len_left == 0) && (len == frag_len_left), /* EOP? */ loopback); frag_len_left -= len; offset += len; } } } static inline void enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) { unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int vlan_tag = 0; int vlan_tag_insert = 0; int loopback = 0; if (vlan_tx_tag_present(skb)) { /* VLAN tag from trunking driver */ vlan_tag_insert = 1; vlan_tag = vlan_tx_tag_get(skb); } else if (enic->loop_enable) { vlan_tag = enic->loop_tag; loopback = 1; } if (mss) enic_queue_wq_skb_tso(enic, wq, skb, mss, vlan_tag_insert, vlan_tag, loopback); else if (skb->ip_summed == CHECKSUM_PARTIAL) enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); else enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, vlan_tag, loopback); } /* netif_tx_lock held, process context with BHs disabled, or BH */ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); struct vnic_wq *wq = &enic->wq[0]; unsigned long flags; if (skb->len <= 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. */ if (skb_shinfo(skb)->gso_size == 0 && skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && skb_linearize(skb)) { dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&enic->wq_lock[0], flags); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { netif_stop_queue(netdev); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) netif_stop_queue(netdev); spin_unlock_irqrestore(&enic->wq_lock[0], flags); return NETDEV_TX_OK; } /* dev_base_lock rwlock held, nominally process context */ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *net_stats) { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; enic_dev_stats_dump(enic, &stats); net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; net_stats->tx_errors = stats->tx.tx_errors; net_stats->tx_dropped = stats->tx.tx_drops; net_stats->rx_packets = stats->rx.rx_frames_ok; net_stats->rx_bytes = stats->rx.rx_bytes_ok; net_stats->rx_errors = stats->rx.rx_errors; net_stats->multicast = stats->rx.rx_multicast_frames_ok; net_stats->rx_over_errors = enic->rq_truncated_pkts; net_stats->rx_crc_errors = enic->rq_bad_fcs; net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; return net_stats; } void enic_reset_addr_lists(struct enic *enic) { enic->mc_count = 0; enic->uc_count = 0; enic->flags = 0; } static int enic_set_mac_addr(struct net_device *netdev, char *addr) { struct enic *enic = netdev_priv(netdev); if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) return -EADDRNOTAVAIL; } else { if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; } memcpy(netdev->dev_addr, addr, netdev->addr_len); netdev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) { struct enic *enic = netdev_priv(netdev); struct sockaddr *saddr = p; char *addr = saddr->sa_data; int err; if (netif_running(enic->netdev)) { err = enic_dev_del_station_addr(enic); if (err) return err; } err = enic_set_mac_addr(netdev, addr); if (err) return err; if (netif_running(enic->netdev)) { err = enic_dev_add_station_addr(enic); if (err) return err; } return err; } static int enic_set_mac_address(struct net_device *netdev, void *p) { struct sockaddr *saddr = p; char *addr = saddr->sa_data; struct enic *enic = netdev_priv(netdev); int err; err = enic_dev_del_station_addr(enic); if (err) return err; err = enic_set_mac_addr(netdev, addr); if (err) return err; return enic_dev_add_station_addr(enic); } static void enic_update_multicast_addr_list(struct enic *enic) { struct net_device *netdev = enic->netdev; struct netdev_hw_addr *ha; unsigned int mc_count = netdev_mc_count(netdev); u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; unsigned int i, j; if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) { netdev_warn(netdev, "Registering only %d out of %d " "multicast addresses\n", ENIC_MULTICAST_PERFECT_FILTERS, mc_count); mc_count = ENIC_MULTICAST_PERFECT_FILTERS; } /* Is there an easier way? Trying to minimize to * calls to add/del multicast addrs. We keep the * addrs from the last call in enic->mc_addr and * look for changes to add/del. */ i = 0; netdev_for_each_mc_addr(ha, netdev) { if (i == mc_count) break; memcpy(mc_addr[i++], ha->addr, ETH_ALEN); } for (i = 0; i < enic->mc_count; i++) { for (j = 0; j < mc_count; j++) if (compare_ether_addr(enic->mc_addr[i], mc_addr[j]) == 0) break; if (j == mc_count) enic_dev_del_addr(enic, enic->mc_addr[i]); } for (i = 0; i < mc_count; i++) { for (j = 0; j < enic->mc_count; j++) if (compare_ether_addr(mc_addr[i], enic->mc_addr[j]) == 0) break; if (j == enic->mc_count) enic_dev_add_addr(enic, mc_addr[i]); } /* Save the list to compare against next time */ for (i = 0; i < mc_count; i++) memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); enic->mc_count = mc_count; } static void enic_update_unicast_addr_list(struct enic *enic) { struct net_device *netdev = enic->netdev; struct netdev_hw_addr *ha; unsigned int uc_count = netdev_uc_count(netdev); u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN]; unsigned int i, j; if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) { netdev_warn(netdev, "Registering only %d out of %d " "unicast addresses\n", ENIC_UNICAST_PERFECT_FILTERS, uc_count); uc_count = ENIC_UNICAST_PERFECT_FILTERS; } /* Is there an easier way? Trying to minimize to * calls to add/del unicast addrs. We keep the * addrs from the last call in enic->uc_addr and * look for changes to add/del. */ i = 0; netdev_for_each_uc_addr(ha, netdev) { if (i == uc_count) break; memcpy(uc_addr[i++], ha->addr, ETH_ALEN); } for (i = 0; i < enic->uc_count; i++) { for (j = 0; j < uc_count; j++) if (compare_ether_addr(enic->uc_addr[i], uc_addr[j]) == 0) break; if (j == uc_count) enic_dev_del_addr(enic, enic->uc_addr[i]); } for (i = 0; i < uc_count; i++) { for (j = 0; j < enic->uc_count; j++) if (compare_ether_addr(uc_addr[i], enic->uc_addr[j]) == 0) break; if (j == enic->uc_count) enic_dev_add_addr(enic, uc_addr[i]); } /* Save the list to compare against next time */ for (i = 0; i < uc_count; i++) memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN); enic->uc_count = uc_count; } /* netif_tx_lock held, BHs disabled */ static void enic_set_rx_mode(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); int directed = 1; int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; int promisc = (netdev->flags & IFF_PROMISC) || netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; int allmulti = (netdev->flags & IFF_ALLMULTI) || netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0) | (promisc ? IFF_PROMISC : 0); if (enic->flags != flags) { enic->flags = flags; enic_dev_packet_filter(enic, directed, multicast, broadcast, promisc, allmulti); } if (!promisc) { enic_update_unicast_addr_list(enic); if (!allmulti) enic_update_multicast_addr_list(enic); } } /* netif_tx_lock held, BHs disabled */ static void enic_tx_timeout(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); schedule_work(&enic->reset); } static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct enic *enic = netdev_priv(netdev); struct enic_port_profile *pp; int err; ENIC_PP_BY_INDEX(enic, vf, pp, &err); if (err) return err; if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) { if (vf == PORT_SELF_VF) { memcpy(pp->vf_mac, mac, ETH_ALEN); return 0; } else { /* * For sriov vf's set the mac in hw */ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_set_mac_addr, mac); return enic_dev_status_to_errno(err); } } else return -EINVAL; } static int enic_set_vf_port(struct net_device *netdev, int vf, struct nlattr *port[]) { struct enic *enic = netdev_priv(netdev); struct enic_port_profile prev_pp; struct enic_port_profile *pp; int err = 0, restore_pp = 1; ENIC_PP_BY_INDEX(enic, vf, pp, &err); if (err) return err; if (!port[IFLA_PORT_REQUEST]) return -EOPNOTSUPP; memcpy(&prev_pp, pp, sizeof(*enic->pp)); memset(pp, 0, sizeof(*enic->pp)); pp->set |= ENIC_SET_REQUEST; pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); if (port[IFLA_PORT_PROFILE]) { pp->set |= ENIC_SET_NAME; memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), PORT_PROFILE_MAX); } if (port[IFLA_PORT_INSTANCE_UUID]) { pp->set |= ENIC_SET_INSTANCE; memcpy(pp->instance_uuid, nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); } if (port[IFLA_PORT_HOST_UUID]) { pp->set |= ENIC_SET_HOST; memcpy(pp->host_uuid, nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); } if (vf == PORT_SELF_VF) { /* Special case handling: mac came from IFLA_VF_MAC */ if (!is_zero_ether_addr(prev_pp.vf_mac)) memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); if (is_zero_ether_addr(netdev->dev_addr)) eth_hw_addr_random(netdev); } else { /* SR-IOV VF: get mac from adapter */ ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnic_dev_get_mac_addr, pp->mac_addr); if (err) { netdev_err(netdev, "Error getting mac for vf %d\n", vf); memcpy(pp, &prev_pp, sizeof(*pp)); return enic_dev_status_to_errno(err); } } err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); if (err) { if (restore_pp) { /* Things are still the way they were: Implicit * DISASSOCIATE failed */ memcpy(pp, &prev_pp, sizeof(*pp)); } else { memset(pp, 0, sizeof(*pp)); if (vf == PORT_SELF_VF) memset(netdev->dev_addr, 0, ETH_ALEN); } } else { /* Set flag to indicate that the port assoc/disassoc * request has been sent out to fw */ pp->set |= ENIC_PORT_REQUEST_APPLIED; /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ if (pp->request == PORT_REQUEST_DISASSOCIATE) { memset(pp->mac_addr, 0, ETH_ALEN); if (vf == PORT_SELF_VF) memset(netdev->dev_addr, 0, ETH_ALEN); } } if (vf == PORT_SELF_VF) memset(pp->vf_mac, 0, ETH_ALEN); return err; } static int enic_get_vf_port(struct net_device *netdev, int vf, struct sk_buff *skb) { struct enic *enic = netdev_priv(netdev); u16 response = PORT_PROFILE_RESPONSE_SUCCESS; struct enic_port_profile *pp; int err; ENIC_PP_BY_INDEX(enic, vf, pp, &err); if (err) return err; if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) return -ENODATA; err = enic_process_get_pp_request(enic, vf, pp->request, &response); if (err) return err; NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request); NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); if (pp->set & ENIC_SET_NAME) NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name); if (pp->set & ENIC_SET_INSTANCE) NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, pp->instance_uuid); if (pp->set & ENIC_SET_HOST) NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid); return 0; nla_put_failure: return -EMSGSIZE; } static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) { struct enic *enic = vnic_dev_priv(rq->vdev); if (!buf->os_buf) return; pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(buf->os_buf); } static int enic_rq_alloc_buf(struct vnic_rq *rq) { struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; unsigned int len = netdev->mtu + VLAN_ETH_HLEN; unsigned int os_buf_index = 0; dma_addr_t dma_addr; skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) return -ENOMEM; dma_addr = pci_map_single(enic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); enic_queue_rq_desc(rq, skb, os_buf_index, dma_addr, len); return 0; } static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) { struct enic *enic = vnic_dev_priv(rq->vdev); struct net_device *netdev = enic->netdev; struct sk_buff *skb; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; u8 packet_error; u16 q_number, completed_index, bytes_written, vlan_tci, checksum; u32 rss_hash; if (skipped) return; skb = buf->os_buf; prefetch(skb->data - NET_IP_ALIGN); pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); if (packet_error) { if (!fcs_ok) { if (bytes_written > 0) enic->rq_bad_fcs++; else if (bytes_written == 0) enic->rq_truncated_pkts++; } dev_kfree_skb_any(skb); return; } if (eop && bytes_written > 0) { /* Good receive */ skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { skb->csum = htons(checksum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->dev = netdev; if (vlan_stripped) __vlan_hwaccel_put_tag(skb, vlan_tci); if (netdev->features & NETIF_F_GRO) napi_gro_receive(&enic->napi[q_number], skb); else netif_receive_skb(skb); } else { /* Buffer overflow */ dev_kfree_skb_any(skb); } } static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type, u16 q_number, u16 completed_index, void *opaque) { struct enic *enic = vnic_dev_priv(vdev); vnic_rq_service(&enic->rq[q_number], cq_desc, completed_index, VNIC_RQ_RETURN_DESC, enic_rq_indicate_buf, opaque); return 0; } static int enic_poll(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); unsigned int cq_rq = enic_cq_rq(enic, 0); unsigned int cq_wq = enic_cq_wq(enic, 0); unsigned int intr = enic_legacy_io_intr(); unsigned int rq_work_to_do = budget; unsigned int wq_work_to_do = -1; /* no limit */ unsigned int work_done, rq_work_done, wq_work_done; int err; /* Service RQ (first) and WQ */ rq_work_done = vnic_cq_service(&enic->cq[cq_rq], rq_work_to_do, enic_rq_service, NULL); wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, enic_wq_service, NULL); /* Accumulate intr event credits for this polling * cycle. An intr event is the completion of a * a WQ or RQ packet. */ work_done = rq_work_done + wq_work_done; if (work_done > 0) vnic_intr_return_credits(&enic->intr[intr], work_done, 0 /* don't unmask intr */, 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. */ if (err) rq_work_done = rq_work_to_do; if (rq_work_done < rq_work_to_do) { /* Some work done, but not enough to stay in polling, * exit polling */ napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } return rq_work_done; } static int enic_poll_msix(struct napi_struct *napi, int budget) { struct net_device *netdev = napi->dev; struct enic *enic = netdev_priv(netdev); unsigned int rq = (napi - &enic->napi[0]); unsigned int cq = enic_cq_rq(enic, rq); unsigned int intr = enic_msix_rq_intr(enic, rq); unsigned int work_to_do = budget; unsigned int work_done; int err; /* Service RQ */ work_done = vnic_cq_service(&enic->cq[cq], work_to_do, enic_rq_service, NULL); /* Return intr event credits for this polling * cycle. An intr event is the completion of a * RQ packet. */ if (work_done > 0) vnic_intr_return_credits(&enic->intr[intr], work_done, 0 /* don't unmask intr */, 0 /* don't reset intr timer */); err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling mode * so we can try to fill the ring again. */ if (err) work_done = work_to_do; if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, * exit polling */ napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } return work_done; } static void enic_notify_timer(unsigned long data) { struct enic *enic = (struct enic *)data; enic_notify_check(enic); mod_timer(&enic->notify_timer, round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); } static void enic_free_intr(struct enic *enic) { struct net_device *netdev = enic->netdev; unsigned int i; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: free_irq(enic->pdev->irq, netdev); break; case VNIC_DEV_INTR_MODE_MSI: free_irq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(enic->msix); i++) if (enic->msix[i].requested) free_irq(enic->msix_entry[i].vector, enic->msix[i].devid); break; default: break; } } static int enic_request_intr(struct enic *enic) { struct net_device *netdev = enic->netdev; unsigned int i, intr; int err = 0; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(enic->pdev->irq, enic_isr_legacy, IRQF_SHARED, netdev->name, netdev); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(enic->pdev->irq, enic_isr_msi, 0, netdev->name, enic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); sprintf(enic->msix[intr].devname, "%.11s-rx-%d", netdev->name, i); enic->msix[intr].isr = enic_isr_msix_rq; enic->msix[intr].devid = &enic->napi[i]; } for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); sprintf(enic->msix[intr].devname, "%.11s-tx-%d", netdev->name, i); enic->msix[intr].isr = enic_isr_msix_wq; enic->msix[intr].devid = enic; } intr = enic_msix_err_intr(enic); sprintf(enic->msix[intr].devname, "%.11s-err", netdev->name); enic->msix[intr].isr = enic_isr_msix_err; enic->msix[intr].devid = enic; intr = enic_msix_notify_intr(enic); sprintf(enic->msix[intr].devname, "%.11s-notify", netdev->name); enic->msix[intr].isr = enic_isr_msix_notify; enic->msix[intr].devid = enic; for (i = 0; i < ARRAY_SIZE(enic->msix); i++) enic->msix[i].requested = 0; for (i = 0; i < enic->intr_count; i++) { err = request_irq(enic->msix_entry[i].vector, enic->msix[i].isr, 0, enic->msix[i].devname, enic->msix[i].devid); if (err) { enic_free_intr(enic); break; } enic->msix[i].requested = 1; } break; default: break; } return err; } static void enic_synchronize_irqs(struct enic *enic) { unsigned int i; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: synchronize_irq(enic->pdev->irq); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->intr_count; i++) synchronize_irq(enic->msix_entry[i].vector); break; default: break; } } static int enic_dev_notify_set(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(enic->vdev, enic_legacy_notify_intr()); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(enic->vdev, enic_msix_notify_intr(enic)); break; default: err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); break; } spin_unlock(&enic->devcmd_lock); return err; } static void enic_notify_timer_start(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSI: mod_timer(&enic->notify_timer, jiffies); break; default: /* Using intr for notification for INTx/MSI-X */ break; } } /* rtnl lock is held, process context */ static int enic_open(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); unsigned int i; int err; err = enic_request_intr(enic); if (err) { netdev_err(netdev, "Unable to request irq.\n"); return err; } err = enic_dev_notify_set(enic); if (err) { netdev_err(netdev, "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_intr; } for (i = 0; i < enic->rq_count; i++) { vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; goto err_out_notify_unset; } } for (i = 0; i < enic->wq_count; i++) vnic_wq_enable(&enic->wq[i]); for (i = 0; i < enic->rq_count; i++) vnic_rq_enable(&enic->rq[i]); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_add_station_addr(enic); enic_set_rx_mode(netdev); netif_wake_queue(netdev); for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); enic_dev_enable(enic); for (i = 0; i < enic->intr_count; i++) vnic_intr_unmask(&enic->intr[i]); enic_notify_timer_start(enic); return 0; err_out_notify_unset: enic_dev_notify_unset(enic); err_out_free_intr: enic_free_intr(enic); return err; } /* rtnl lock is held, process context */ static int enic_stop(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); unsigned int i; int err; for (i = 0; i < enic->intr_count; i++) { vnic_intr_mask(&enic->intr[i]); (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ } enic_synchronize_irqs(enic); del_timer_sync(&enic->notify_timer); enic_dev_disable(enic); for (i = 0; i < enic->rq_count; i++) napi_disable(&enic->napi[i]); netif_carrier_off(netdev); netif_tx_disable(netdev); if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) enic_dev_del_station_addr(enic); for (i = 0; i < enic->wq_count; i++) { err = vnic_wq_disable(&enic->wq[i]); if (err) return err; } for (i = 0; i < enic->rq_count; i++) { err = vnic_rq_disable(&enic->rq[i]); if (err) return err; } enic_dev_notify_unset(enic); enic_free_intr(enic); for (i = 0; i < enic->wq_count; i++) vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); for (i = 0; i < enic->rq_count; i++) vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) vnic_intr_clean(&enic->intr[i]); return 0; } static int enic_change_mtu(struct net_device *netdev, int new_mtu) { struct enic *enic = netdev_priv(netdev); int running = netif_running(netdev); if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) return -EINVAL; if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; if (running) enic_stop(netdev); netdev->mtu = new_mtu; if (netdev->mtu > enic->port_mtu) netdev_warn(netdev, "interface MTU (%d) set higher than port MTU (%d)\n", netdev->mtu, enic->port_mtu); if (running) enic_open(netdev); return 0; } static void enic_change_mtu_work(struct work_struct *work) { struct enic *enic = container_of(work, struct enic, change_mtu_work); struct net_device *netdev = enic->netdev; int new_mtu = vnic_dev_mtu(enic->vdev); int err; unsigned int i; new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); rtnl_lock(); /* Stop RQ */ del_timer_sync(&enic->notify_timer); for (i = 0; i < enic->rq_count; i++) napi_disable(&enic->napi[i]); vnic_intr_mask(&enic->intr[0]); enic_synchronize_irqs(enic); err = vnic_rq_disable(&enic->rq[0]); if (err) { netdev_err(netdev, "Unable to disable RQ.\n"); return; } vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); vnic_cq_clean(&enic->cq[0]); vnic_intr_clean(&enic->intr[0]); /* Fill RQ with new_mtu-sized buffers */ netdev->mtu = new_mtu; vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[0]) == 0) { netdev_err(netdev, "Unable to alloc receive buffers.\n"); return; } /* Start RQ */ vnic_rq_enable(&enic->rq[0]); napi_enable(&enic->napi[0]); vnic_intr_unmask(&enic->intr[0]); enic_notify_timer_start(enic); rtnl_unlock(); netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); } #ifdef CONFIG_NET_POLL_CONTROLLER static void enic_poll_controller(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); struct vnic_dev *vdev = enic->vdev; unsigned int i, intr; switch (vnic_dev_get_intr_mode(vdev)) { case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); enic_isr_msix_rq(enic->msix_entry[intr].vector, &enic->napi[i]); } for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); } break; case VNIC_DEV_INTR_MODE_MSI: enic_isr_msi(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_INTX: enic_isr_legacy(enic->pdev->irq, netdev); break; default: break; } } #endif static int enic_dev_wait(struct vnic_dev *vdev, int (*start)(struct vnic_dev *, int), int (*finished)(struct vnic_dev *, int *), int arg) { unsigned long time; int done; int err; BUG_ON(in_interrupt()); err = start(vdev, arg); if (err) return err; /* Wait for func to complete...2 seconds max */ time = jiffies + (HZ * 2); do { err = finished(vdev, &done); if (err) return err; if (done) return 0; schedule_timeout_uninterruptible(HZ / 10); } while (time_after(time, jiffies)); return -ETIMEDOUT; } static int enic_dev_open(struct enic *enic) { int err; err = enic_dev_wait(enic->vdev, vnic_dev_open, vnic_dev_open_done, 0); if (err) dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", err); return err; } static int enic_dev_hang_reset(struct enic *enic) { int err; err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, vnic_dev_hang_reset_done, 0); if (err) netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", err); return err; } static int enic_set_rsskey(struct enic *enic) { dma_addr_t rss_key_buf_pa; union vnic_rss_key *rss_key_buf_va = NULL; union vnic_rss_key rss_key = { .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, }; int err; rss_key_buf_va = pci_alloc_consistent(enic->pdev, sizeof(union vnic_rss_key), &rss_key_buf_pa); if (!rss_key_buf_va) return -ENOMEM; memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); spin_lock(&enic->devcmd_lock); err = enic_set_rss_key(enic, rss_key_buf_pa, sizeof(union vnic_rss_key)); spin_unlock(&enic->devcmd_lock); pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); return err; } static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) { dma_addr_t rss_cpu_buf_pa; union vnic_rss_cpu *rss_cpu_buf_va = NULL; unsigned int i; int err; rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); if (!rss_cpu_buf_va) return -ENOMEM; for (i = 0; i < (1 << rss_hash_bits); i++) (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; spin_lock(&enic->devcmd_lock); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, sizeof(union vnic_rss_cpu)); spin_unlock(&enic->devcmd_lock); pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); return err; } static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) { const u8 tso_ipid_split_en = 0; const u8 ig_vlan_strip_en = 1; int err; /* Enable VLAN tag stripping. */ spin_lock(&enic->devcmd_lock); err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); spin_unlock(&enic->devcmd_lock); return err; } static int enic_set_rss_nic_cfg(struct enic *enic) { struct device *dev = enic_get_dev(enic); const u8 rss_default_cpu = 0; const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | NIC_CFG_RSS_HASH_TYPE_IPV6 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; const u8 rss_hash_bits = 7; const u8 rss_base_cpu = 0; u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); if (rss_enable) { if (!enic_set_rsskey(enic)) { if (enic_set_rsscpu(enic, rss_hash_bits)) { rss_enable = 0; dev_warn(dev, "RSS disabled, " "Failed to set RSS cpu indirection table."); } } else { rss_enable = 0; dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); } } return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable); } static void enic_reset(struct work_struct *work) { struct enic *enic = container_of(work, struct enic, reset); if (!netif_running(enic->netdev)) return; rtnl_lock(); enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); enic_reset_addr_lists(enic); enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); enic_open(enic->netdev); rtnl_unlock(); } static int enic_set_intr_mode(struct enic *enic) { unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); unsigned int i; /* Set interrupt mode (INTx, MSI, MSI-X) depending * on system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs * (the second to last INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); for (i = 0; i < n + m + 2; i++) enic->msix_entry[i].entry = i; /* Use multiple RQs if RSS is enabled */ if (ENIC_SETTING(enic, RSS) && enic->config.intr_mode < 1 && enic->rq_count >= n && enic->wq_count >= m && enic->cq_count >= n + m && enic->intr_count >= n + m + 2) { if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { enic->rq_count = n; enic->wq_count = m; enic->cq_count = n + m; enic->intr_count = n + m + 2; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } if (enic->config.intr_mode < 1 && enic->rq_count >= 1 && enic->wq_count >= m && enic->cq_count >= 1 + m && enic->intr_count >= 1 + m + 2) { if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { enic->rq_count = 1; enic->wq_count = m; enic->cq_count = 1 + m; enic->intr_count = 1 + m + 2; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* Next try MSI * * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR */ if (enic->config.intr_mode < 2 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 1 && !pci_enable_msi(enic->pdev)) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 1; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* Next try INTx * * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs * (the first INTR is used for WQ/RQ) * (the second INTR is used for WQ/RQ errors) * (the last INTR is used for notifications) */ if (enic->config.intr_mode < 3 && enic->rq_count >= 1 && enic->wq_count >= 1 && enic->cq_count >= 2 && enic->intr_count >= 3) { enic->rq_count = 1; enic->wq_count = 1; enic->cq_count = 2; enic->intr_count = 3; vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } static void enic_clear_intr_mode(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(enic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(enic->pdev); break; default: break; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); } static const struct net_device_ops enic_netdev_dynamic_ops = { .ndo_open = enic_open, .ndo_stop = enic_stop, .ndo_start_xmit = enic_hard_start_xmit, .ndo_get_stats64 = enic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = enic_set_rx_mode, .ndo_set_mac_address = enic_set_mac_address_dynamic, .ndo_change_mtu = enic_change_mtu, .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, .ndo_tx_timeout = enic_tx_timeout, .ndo_set_vf_port = enic_set_vf_port, .ndo_get_vf_port = enic_get_vf_port, .ndo_set_vf_mac = enic_set_vf_mac, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = enic_poll_controller, #endif }; static const struct net_device_ops enic_netdev_ops = { .ndo_open = enic_open, .ndo_stop = enic_stop, .ndo_start_xmit = enic_hard_start_xmit, .ndo_get_stats64 = enic_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = enic_set_mac_address, .ndo_set_rx_mode = enic_set_rx_mode, .ndo_change_mtu = enic_change_mtu, .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, .ndo_tx_timeout = enic_tx_timeout, .ndo_set_vf_port = enic_set_vf_port, .ndo_get_vf_port = enic_get_vf_port, .ndo_set_vf_mac = enic_set_vf_mac, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = enic_poll_controller, #endif }; static void enic_dev_deinit(struct enic *enic) { unsigned int i; for (i = 0; i < enic->rq_count; i++) netif_napi_del(&enic->napi[i]); enic_free_vnic_resources(enic); enic_clear_intr_mode(enic); } static int enic_dev_init(struct enic *enic) { struct device *dev = enic_get_dev(enic); struct net_device *netdev = enic->netdev; unsigned int i; int err; /* Get interrupt coalesce timer info */ err = enic_dev_intr_coal_timer_info(enic); if (err) { dev_warn(dev, "Using default conversion factor for " "interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(enic->vdev); } /* Get vNIC configuration */ err = enic_get_vnic_config(enic); if (err) { dev_err(dev, "Get vNIC configuration failed, aborting\n"); return err; } /* Get available resource counts */ enic_get_res_counts(enic); /* Set interrupt mode based on resource counts and system * capabilities */ err = enic_set_intr_mode(enic); if (err) { dev_err(dev, "Failed to set intr mode based on resource " "counts and system capabilities, aborting\n"); return err; } /* Allocate and configure vNIC resources */ err = enic_alloc_vnic_resources(enic); if (err) { dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); goto err_out_free_vnic_resources; } enic_init_vnic_resources(enic); err = enic_set_rss_nic_cfg(enic); if (err) { dev_err(dev, "Failed to config nic, aborting\n"); goto err_out_free_vnic_resources; } switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) netif_napi_add(netdev, &enic->napi[i], enic_poll_msix, 64); break; } return 0; err_out_free_vnic_resources: enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); return err; } static void enic_iounmap(struct enic *enic) { unsigned int i; for (i = 0; i < ARRAY_SIZE(enic->bar); i++) if (enic->bar[i].vaddr) iounmap(enic->bar[i].vaddr); } static int __devinit enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct net_device *netdev; struct enic *enic; int using_dac = 0; unsigned int i; int err; #ifdef CONFIG_PCI_IOV int pos = 0; #endif int num_pps = 1; /* Allocate net device structure and initialize. Private * instance data is initialized to zero. */ netdev = alloc_etherdev(sizeof(struct enic)); if (!netdev) return -ENOMEM; pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); enic = netdev_priv(netdev); enic->netdev = netdev; enic->pdev = pdev; /* Setup PCI resources */ err = pci_enable_device_mem(pdev); if (err) { dev_err(dev, "Cannot enable PCI device, aborting\n"); goto err_out_free_netdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "Cannot request PCI regions, aborting\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(dev, "No usable DMA configuration, aborting\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " "for consistent allocations, aborting\n", 32); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " "for consistent allocations, aborting\n", 40); goto err_out_release_regions; } using_dac = 1; } /* Map vNIC resources from BAR0-5 */ for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; enic->bar[i].len = pci_resource_len(pdev, i); enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); if (!enic->bar[i].vaddr) { dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); err = -ENODEV; goto err_out_iounmap; } enic->bar[i].bus_addr = pci_resource_start(pdev, i); } /* Register vNIC device */ enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, ARRAY_SIZE(enic->bar)); if (!enic->vdev) { dev_err(dev, "vNIC registration failed, aborting\n"); err = -ENODEV; goto err_out_iounmap; } #ifdef CONFIG_PCI_IOV /* Get number of subvnics */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos) { pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &enic->num_vfs); if (enic->num_vfs) { err = pci_enable_sriov(pdev, enic->num_vfs); if (err) { dev_err(dev, "SRIOV enable failed, aborting." " pci_enable_sriov() returned %d\n", err); goto err_out_vnic_unregister; } enic->priv_flags |= ENIC_SRIOV_ENABLED; num_pps = enic->num_vfs; } } #endif /* Allocate structure for port profiles */ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); if (!enic->pp) { err = -ENOMEM; goto err_out_disable_sriov_pp; } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { dev_err(dev, "vNIC dev open failed, aborting\n"); goto err_out_disable_sriov; } /* Setup devcmd lock */ spin_lock_init(&enic->devcmd_lock); /* * Set ingress vlan rewrite mode before vnic initialization */ err = enic_dev_set_ig_vlan_rewrite_mode(enic); if (err) { dev_err(dev, "Failed to set ingress vlan rewrite mode, aborting.\n"); goto err_out_dev_close; } /* Issue device init to initialize the vnic-to-switch link. * We'll start with carrier off and wait for link UP * notification later to turn on carrier. We don't need * to wait here for the vnic-to-switch link initialization * to complete; link UP notification is the indication that * the process is complete. */ netif_carrier_off(netdev); /* Do not call dev_init for a dynamic vnic. * For a dynamic vnic, init_prov_info will be * called later by an upper layer. */ if (!enic_is_dynamic(enic)) { err = vnic_dev_init(enic->vdev, 0); if (err) { dev_err(dev, "vNIC dev init failed, aborting\n"); goto err_out_dev_close; } } err = enic_dev_init(enic); if (err) { dev_err(dev, "Device initialization failed, aborting\n"); goto err_out_dev_close; } /* Setup notification timer, HW reset task, and wq locks */ init_timer(&enic->notify_timer); enic->notify_timer.function = enic_notify_timer; enic->notify_timer.data = (unsigned long)enic; INIT_WORK(&enic->reset, enic_reset); INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); for (i = 0; i < enic->wq_count; i++) spin_lock_init(&enic->wq_lock[i]); /* Register net device */ enic->port_mtu = enic->config.mtu; (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { dev_err(dev, "Invalid MAC address, aborting\n"); goto err_out_dev_deinit; } enic->tx_coalesce_usecs = enic->config.intr_timer_usec; enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) netdev->netdev_ops = &enic_netdev_dynamic_ops; else netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; if (ENIC_SETTING(enic, LOOP)) { netdev->features &= ~NETIF_F_HW_VLAN_TX; enic->loop_enable = 1; enic->loop_tag = enic->config.loop_tag; dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); } if (ENIC_SETTING(enic, TXCSUM)) netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; if (ENIC_SETTING(enic, RXCSUM)) netdev->hw_features |= NETIF_F_RXCSUM; netdev->features |= netdev->hw_features; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; err = register_netdev(netdev); if (err) { dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } return 0; err_out_dev_deinit: enic_dev_deinit(enic); err_out_dev_close: vnic_dev_close(enic->vdev); err_out_disable_sriov: kfree(enic->pp); err_out_disable_sriov_pp: #ifdef CONFIG_PCI_IOV if (enic_sriov_enabled(enic)) { pci_disable_sriov(pdev); enic->priv_flags &= ~ENIC_SRIOV_ENABLED; } err_out_vnic_unregister: #endif vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; } static void __devexit enic_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); if (netdev) { struct enic *enic = netdev_priv(netdev); cancel_work_sync(&enic->reset); cancel_work_sync(&enic->change_mtu_work); unregister_netdev(netdev); enic_dev_deinit(enic); vnic_dev_close(enic->vdev); #ifdef CONFIG_PCI_IOV if (enic_sriov_enabled(enic)) { pci_disable_sriov(pdev); enic->priv_flags &= ~ENIC_SRIOV_ENABLED; } #endif kfree(enic->pp); vnic_dev_unregister(enic->vdev); enic_iounmap(enic); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); free_netdev(netdev); } } static struct pci_driver enic_driver = { .name = DRV_NAME, .id_table = enic_id_table, .probe = enic_probe, .remove = __devexit_p(enic_remove), }; static int __init enic_init_module(void) { pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); return pci_register_driver(&enic_driver); } static void __exit enic_cleanup_module(void) { pci_unregister_driver(&enic_driver); } module_init(enic_init_module); module_exit(enic_cleanup_module);
gpl-2.0
mer-hybris/android_kernel_lge_hammerhead
drivers/video/backlight/tdo24m.c
4808
11119
/* * tdo24m - SPI-based drivers for Toppoly TDO24M series LCD panels * * Copyright (C) 2008 Marvell International Ltd. * Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/tdo24m.h> #include <linux/fb.h> #include <linux/lcd.h> #include <linux/slab.h> #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) #define TDO24M_SPI_BUFF_SIZE (4) #define MODE_QVGA 0 #define MODE_VGA 1 struct tdo24m { struct spi_device *spi_dev; struct lcd_device *lcd_dev; struct spi_message msg; struct spi_transfer xfer; uint8_t *buf; int (*adj_mode)(struct tdo24m *lcd, int mode); int color_invert; int power; int mode; }; /* use bit 30, 31 as the indicator of command parameter number */ #define CMD0(x) ((0 << 30) | (x)) #define CMD1(x, x1) ((1 << 30) | ((x) << 9) | 0x100 | (x1)) #define CMD2(x, x1, x2) ((2 << 30) | ((x) << 18) | 0x20000 |\ ((x1) << 9) | 0x100 | (x2)) #define CMD_NULL (-1) static uint32_t lcd_panel_reset[] = { CMD0(0x1), /* reset */ CMD0(0x0), /* nop */ CMD0(0x0), /* nop */ CMD0(0x0), /* nop */ CMD_NULL, }; static uint32_t lcd_panel_on[] = { CMD0(0x29), /* Display ON */ CMD2(0xB8, 0xFF, 0xF9), /* Output Control */ CMD0(0x11), /* Sleep out */ CMD1(0xB0, 0x16), /* Wake */ CMD_NULL, }; static uint32_t lcd_panel_off[] = { CMD0(0x28), /* Display OFF */ CMD2(0xB8, 0x80, 0x02), /* Output Control */ CMD0(0x10), /* Sleep in */ CMD1(0xB0, 0x00), /* Deep stand by in */ CMD_NULL, }; static uint32_t lcd_vga_pass_through_tdo24m[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x80), CMD1(0xE1, 0x00), CMD1(0x36, 0x50), CMD1(0x3B, 0x00), CMD_NULL, }; static uint32_t lcd_qvga_pass_through_tdo24m[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x81), CMD1(0xE1, 0x00), CMD1(0x36, 0x50), CMD1(0x3B, 0x22), CMD_NULL, }; static uint32_t lcd_vga_transfer_tdo24m[] = { CMD1(0xcf, 0x02), /* Blanking period control (1) */ CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd1, 0x01), /* CKV timing control on/off */ CMD2(0xd2, 0x14, 0x00), /* CKV 1,2 timing control */ CMD2(0xd3, 0x1a, 0x0f), /* OEV timing control */ CMD2(0xd4, 0x1f, 0xaf), /* ASW timing control (1) */ CMD1(0xd5, 0x14), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_qvga_transfer[] = { CMD1(0xd6, 0x02), /* Blanking period control (1) */ CMD2(0xd7, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd8, 0x01), /* CKV timing control on/off */ CMD2(0xd9, 0x00, 0x08), /* CKV 1,2 timing control */ CMD2(0xde, 0x05, 0x0a), /* OEV timing control */ CMD2(0xdf, 0x0a, 0x19), /* ASW timing control (1) */ CMD1(0xe0, 0x0a), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_vga_pass_through_tdo35s[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x80), CMD1(0xE1, 0x00), CMD1(0x3B, 0x00), CMD_NULL, }; static uint32_t lcd_qvga_pass_through_tdo35s[] = { CMD1(0xB0, 0x16), CMD1(0xBC, 0x81), CMD1(0xE1, 0x00), CMD1(0x3B, 0x22), CMD_NULL, }; static uint32_t lcd_vga_transfer_tdo35s[] = { CMD1(0xcf, 0x02), /* Blanking period control (1) */ CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ CMD1(0xd1, 0x01), /* CKV timing control on/off */ CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */ CMD2(0xd3, 0x14, 0x28), /* OEV timing control */ CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */ CMD1(0xd5, 0x28), /* ASW timing control (2) */ CMD0(0x21), /* Invert for normally black display */ CMD0(0x29), /* Display on */ CMD_NULL, }; static uint32_t lcd_panel_config[] = { CMD2(0xb8, 0xff, 0xf9), /* Output control */ CMD0(0x11), /* sleep out */ CMD1(0xba, 0x01), /* Display mode (1) */ CMD1(0xbb, 0x00), /* Display mode (2) */ CMD1(0x3a, 0x60), /* Display mode 18-bit RGB */ CMD1(0xbf, 0x10), /* Drive system change control */ CMD1(0xb1, 0x56), /* Booster operation setup */ CMD1(0xb2, 0x33), /* Booster mode setup */ CMD1(0xb3, 0x11), /* Booster frequency setup */ CMD1(0xb4, 0x02), /* Op amp/system clock */ CMD1(0xb5, 0x35), /* VCS voltage */ CMD1(0xb6, 0x40), /* VCOM voltage */ CMD1(0xb7, 0x03), /* External display signal */ CMD1(0xbd, 0x00), /* ASW slew rate */ CMD1(0xbe, 0x00), /* Dummy data for QuadData operation */ CMD1(0xc0, 0x11), /* Sleep out FR count (A) */ CMD1(0xc1, 0x11), /* Sleep out FR count (B) */ CMD1(0xc2, 0x11), /* Sleep out FR count (C) */ CMD2(0xc3, 0x20, 0x40), /* Sleep out FR count (D) */ CMD2(0xc4, 0x60, 0xc0), /* Sleep out FR count (E) */ CMD2(0xc5, 0x10, 0x20), /* Sleep out FR count (F) */ CMD1(0xc6, 0xc0), /* Sleep out FR count (G) */ CMD2(0xc7, 0x33, 0x43), /* Gamma 1 fine tuning (1) */ CMD1(0xc8, 0x44), /* Gamma 1 fine tuning (2) */ CMD1(0xc9, 0x33), /* Gamma 1 inclination adjustment */ CMD1(0xca, 0x00), /* Gamma 1 blue offset adjustment */ CMD2(0xec, 0x01, 0xf0), /* Horizontal clock cycles */ CMD_NULL, }; static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array) { struct spi_transfer *x = &lcd->xfer; uint32_t data, *p = array; int nparams, err = 0; for (; *p != CMD_NULL; p++) { if (!lcd->color_invert && *p == CMD0(0x21)) continue; nparams = (*p >> 30) & 0x3; data = *p << (7 - nparams); switch (nparams) { case 0: lcd->buf[0] = (data >> 8) & 0xff; lcd->buf[1] = data & 0xff; break; case 1: lcd->buf[0] = (data >> 16) & 0xff; lcd->buf[1] = (data >> 8) & 0xff; lcd->buf[2] = data & 0xff; break; case 2: lcd->buf[0] = (data >> 24) & 0xff; lcd->buf[1] = (data >> 16) & 0xff; lcd->buf[2] = (data >> 8) & 0xff; lcd->buf[3] = data & 0xff; break; default: continue; } x->len = nparams + 2; err = spi_sync(lcd->spi_dev, &lcd->msg); if (err) break; } return err; } static int tdo24m_adj_mode(struct tdo24m *lcd, int mode) { switch (mode) { case MODE_VGA: tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_vga_transfer_tdo24m); break; case MODE_QVGA: tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_qvga_transfer); break; default: return -EINVAL; } lcd->mode = mode; return 0; } static int tdo35s_adj_mode(struct tdo24m *lcd, int mode) { switch (mode) { case MODE_VGA: tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_vga_transfer_tdo35s); break; case MODE_QVGA: tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s); tdo24m_writes(lcd, lcd_panel_config); tdo24m_writes(lcd, lcd_qvga_transfer); break; default: return -EINVAL; } lcd->mode = mode; return 0; } static int tdo24m_power_on(struct tdo24m *lcd) { int err; err = tdo24m_writes(lcd, lcd_panel_on); if (err) goto out; err = tdo24m_writes(lcd, lcd_panel_reset); if (err) goto out; err = lcd->adj_mode(lcd, lcd->mode); out: return err; } static int tdo24m_power_off(struct tdo24m *lcd) { return tdo24m_writes(lcd, lcd_panel_off); } static int tdo24m_power(struct tdo24m *lcd, int power) { int ret = 0; if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) ret = tdo24m_power_on(lcd); else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) ret = tdo24m_power_off(lcd); if (!ret) lcd->power = power; return ret; } static int tdo24m_set_power(struct lcd_device *ld, int power) { struct tdo24m *lcd = lcd_get_data(ld); return tdo24m_power(lcd, power); } static int tdo24m_get_power(struct lcd_device *ld) { struct tdo24m *lcd = lcd_get_data(ld); return lcd->power; } static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m) { struct tdo24m *lcd = lcd_get_data(ld); int mode = MODE_QVGA; if (m->xres == 640 || m->xres == 480) mode = MODE_VGA; if (lcd->mode == mode) return 0; return lcd->adj_mode(lcd, mode); } static struct lcd_ops tdo24m_ops = { .get_power = tdo24m_get_power, .set_power = tdo24m_set_power, .set_mode = tdo24m_set_mode, }; static int __devinit tdo24m_probe(struct spi_device *spi) { struct tdo24m *lcd; struct spi_message *m; struct spi_transfer *x; struct tdo24m_platform_data *pdata; enum tdo24m_model model; int err; pdata = spi->dev.platform_data; if (pdata) model = pdata->model; else model = TDO24M; spi->bits_per_word = 8; spi->mode = SPI_MODE_3; err = spi_setup(spi); if (err) return err; lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL); if (!lcd) return -ENOMEM; lcd->spi_dev = spi; lcd->power = FB_BLANK_POWERDOWN; lcd->mode = MODE_VGA; /* default to VGA */ lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); if (lcd->buf == NULL) { kfree(lcd); return -ENOMEM; } m = &lcd->msg; x = &lcd->xfer; spi_message_init(m); x->cs_change = 1; x->tx_buf = &lcd->buf[0]; spi_message_add_tail(x, m); switch (model) { case TDO24M: lcd->color_invert = 1; lcd->adj_mode = tdo24m_adj_mode; break; case TDO35S: lcd->adj_mode = tdo35s_adj_mode; lcd->color_invert = 0; break; default: dev_err(&spi->dev, "Unsupported model"); goto out_free; } lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, lcd, &tdo24m_ops); if (IS_ERR(lcd->lcd_dev)) { err = PTR_ERR(lcd->lcd_dev); goto out_free; } dev_set_drvdata(&spi->dev, lcd); err = tdo24m_power(lcd, FB_BLANK_UNBLANK); if (err) goto out_unregister; return 0; out_unregister: lcd_device_unregister(lcd->lcd_dev); out_free: kfree(lcd->buf); kfree(lcd); return err; } static int __devexit tdo24m_remove(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); tdo24m_power(lcd, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->lcd_dev); kfree(lcd->buf); kfree(lcd); return 0; } #ifdef CONFIG_PM static int tdo24m_suspend(struct spi_device *spi, pm_message_t state) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); return tdo24m_power(lcd, FB_BLANK_POWERDOWN); } static int tdo24m_resume(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); return tdo24m_power(lcd, FB_BLANK_UNBLANK); } #else #define tdo24m_suspend NULL #define tdo24m_resume NULL #endif /* Power down all displays on reboot, poweroff or halt */ static void tdo24m_shutdown(struct spi_device *spi) { struct tdo24m *lcd = dev_get_drvdata(&spi->dev); tdo24m_power(lcd, FB_BLANK_POWERDOWN); } static struct spi_driver tdo24m_driver = { .driver = { .name = "tdo24m", .owner = THIS_MODULE, }, .probe = tdo24m_probe, .remove = __devexit_p(tdo24m_remove), .shutdown = tdo24m_shutdown, .suspend = tdo24m_suspend, .resume = tdo24m_resume, }; module_spi_driver(tdo24m_driver); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); MODULE_DESCRIPTION("Driver for Toppoly TDO24M LCD Panel"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:tdo24m");
gpl-2.0
schqiushui/kernel_kk442_sense_dlx
drivers/staging/xgifb/XGI_main_26.c
4808
60736
/* * XG20, XG21, XG40, XG42 frame buffer device * for Linux kernels 2.5.x, 2.6.x * Base on TW's sis fbdev code. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* #include <linux/config.h> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/console.h> #include <linux/selection.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/vt_kern.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/io.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include "XGIfb.h" #include "vgatypes.h" #include "XGI_main.h" #include "vb_init.h" #include "vb_util.h" #include "vb_setmode.h" #define Index_CR_GPIO_Reg1 0x48 #define Index_CR_GPIO_Reg3 0x4a #define GPIOG_EN (1<<6) #define GPIOG_READ (1<<1) static char *forcecrt2type; static char *mode; static int vesa = -1; static unsigned int refresh_rate; /* -------------------- Macro definitions ---------------------------- */ #undef XGIFBDEBUG #ifdef XGIFBDEBUG #define DPRINTK(fmt, args...) pr_debug("%s: " fmt, __func__ , ## args) #else #define DPRINTK(fmt, args...) #endif #ifdef XGIFBDEBUG static void dumpVGAReg(void) { u8 i, reg; xgifb_reg_set(XGISR, 0x05, 0x86); /* xgifb_reg_set(XGISR, 0x08, 0x4f); xgifb_reg_set(XGISR, 0x0f, 0x20); xgifb_reg_set(XGISR, 0x11, 0x4f); xgifb_reg_set(XGISR, 0x13, 0x45); xgifb_reg_set(XGISR, 0x14, 0x51); xgifb_reg_set(XGISR, 0x1e, 0x41); xgifb_reg_set(XGISR, 0x1f, 0x0); xgifb_reg_set(XGISR, 0x20, 0xa1); xgifb_reg_set(XGISR, 0x22, 0xfb); xgifb_reg_set(XGISR, 0x26, 0x22); xgifb_reg_set(XGISR, 0x3e, 0x07); */ /* xgifb_reg_set(XGICR, 0x19, 0x00); */ /* xgifb_reg_set(XGICR, 0x1a, 0x3C); */ /* xgifb_reg_set(XGICR, 0x22, 0xff); */ /* xgifb_reg_set(XGICR, 0x3D, 0x10); */ /* xgifb_reg_set(XGICR, 0x4a, 0xf3); */ /* xgifb_reg_set(XGICR, 0x57, 0x0); */ /* xgifb_reg_set(XGICR, 0x7a, 0x2c); */ /* xgifb_reg_set(XGICR, 0x82, 0xcc); */ /* xgifb_reg_set(XGICR, 0x8c, 0x0); */ /* xgifb_reg_set(XGICR, 0x99, 0x1); xgifb_reg_set(XGICR, 0x41, 0x40); */ for (i = 0; i < 0x4f; i++) { reg = xgifb_reg_get(XGISR, i); printk("\no 3c4 %x", i); printk("\ni 3c5 => %x", reg); } for (i = 0; i < 0xF0; i++) { reg = xgifb_reg_get(XGICR, i); printk("\no 3d4 %x", i); printk("\ni 3d5 => %x", reg); } /* xgifb_reg_set(XGIPART1,0x2F,1); for (i=1; i < 0x50; i++) { reg = xgifb_reg_get(XGIPART1, i); printk("\no d004 %x", i); printk("\ni d005 => %x", reg); } for (i=0; i < 0x50; i++) { reg = xgifb_reg_get(XGIPART2, i); printk("\no d010 %x", i); printk("\ni d011 => %x", reg); } for (i=0; i < 0x50; i++) { reg = xgifb_reg_get(XGIPART3, i); printk("\no d012 %x",i); printk("\ni d013 => %x",reg); } for (i=0; i < 0x50; i++) { reg = xgifb_reg_get(XGIPART4, i); printk("\no d014 %x",i); printk("\ni d015 => %x",reg); } */ } #else static inline void dumpVGAReg(void) { } #endif #if 1 #define DEBUGPRN(x) #else #define DEBUGPRN(x) pr_info(x "\n"); #endif /* --------------- Hardware Access Routines -------------------------- */ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr, struct xgi_hw_device_info *HwDeviceExtension, unsigned char modeno, unsigned char rateindex) { unsigned short ModeNo = modeno; unsigned short ModeIdIndex = 0, ClockIndex = 0; unsigned short RefreshRateTableIndex = 0; /* unsigned long temp = 0; */ int Clock; InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr); RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, ModeIdIndex, XGI_Pr); /* temp = XGI_SearchModeID(ModeNo , &ModeIdIndex, XGI_Pr) ; if (!temp) { printk(KERN_ERR "Could not find mode %x\n", ModeNo); return 65000; } RefreshRateTableIndex = XGI_Pr->EModeIDTable[ModeIdIndex].REFindex; RefreshRateTableIndex += (rateindex - 1); */ ClockIndex = XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK; Clock = XGI_Pr->VCLKData[ClockIndex].CLOCK * 1000; return Clock; } static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr, struct xgi_hw_device_info *HwDeviceExtension, unsigned char modeno, unsigned char rateindex, u32 *left_margin, u32 *right_margin, u32 *upper_margin, u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync, u32 *vmode) { unsigned short ModeNo = modeno; unsigned short ModeIdIndex = 0, index = 0; unsigned short RefreshRateTableIndex = 0; unsigned short VRE, VBE, VRS, VBS, VDE, VT; unsigned short HRE, HBE, HRS, HBS, HDE, HT; unsigned char sr_data, cr_data, cr_data2; unsigned long cr_data3; int A, B, C, D, E, F, temp, j; InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr); RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo, ModeIdIndex, XGI_Pr); /* temp = XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr); if (!temp) return 0; RefreshRateTableIndex = XGI_Pr->EModeIDTable[ModeIdIndex].REFindex; RefreshRateTableIndex += (rateindex - 1); */ index = XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC; sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[5]; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[0]; /* Horizontal total */ HT = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x03) << 8); A = HT + 5; /* cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[1]; Horizontal display enable end HDE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x0C) << 6); */ HDE = (XGI_Pr->RefIndex[RefreshRateTableIndex].XRes >> 3) - 1; E = HDE + 1; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[3]; /* Horizontal retrace (=sync) start */ HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2); F = HRS - E - 3; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[1]; /* Horizontal blank start */ HBS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x30) << 4); sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[6]; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[2]; cr_data2 = XGI_Pr->XGINEWUB_CRT1Table[index].CR[4]; /* Horizontal blank end */ HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2) | ((unsigned short) (sr_data & 0x03) << 6); /* Horizontal retrace (=sync) end */ HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3); temp = HBE - ((E - 1) & 255); B = (temp > 0) ? temp : (temp + 256); temp = HRE - ((E + F + 3) & 63); C = (temp > 0) ? temp : (temp + 64); D = B - F - C; *left_margin = D * 8; *right_margin = F * 8; *hsync_len = C * 8; sr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[14]; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[8]; cr_data2 = XGI_Pr->XGINEWUB_CRT1Table[index].CR[9]; /* Vertical total */ VT = (cr_data & 0xFF) | ((unsigned short) (cr_data2 & 0x01) << 8) | ((unsigned short) (cr_data2 & 0x20) << 4) | ((unsigned short) (sr_data & 0x01) << 10); A = VT + 2; /* cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[10]; */ /* Vertical display enable end */ /* VDE = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x02) << 7) | ((unsigned short) (cr_data2 & 0x40) << 3) | ((unsigned short) (sr_data & 0x02) << 9); */ VDE = XGI_Pr->RefIndex[RefreshRateTableIndex].YRes - 1; E = VDE + 1; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[10]; /* Vertical retrace (=sync) start */ VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6) | ((unsigned short) (cr_data2 & 0x80) << 2) | ((unsigned short) (sr_data & 0x08) << 7); F = VRS + 1 - E; cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[12]; cr_data3 = (XGI_Pr->XGINEWUB_CRT1Table[index].CR[14] & 0x80) << 5; /* Vertical blank start */ VBS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x08) << 5) | ((unsigned short) (cr_data3 & 0x20) << 4) | ((unsigned short) (sr_data & 0x04) << 8); cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[13]; /* Vertical blank end */ VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4); temp = VBE - ((E - 1) & 511); B = (temp > 0) ? temp : (temp + 512); cr_data = XGI_Pr->XGINEWUB_CRT1Table[index].CR[11]; /* Vertical retrace (=sync) end */ VRE = (cr_data & 0x0f) | ((sr_data & 0x20) >> 1); temp = VRE - ((E + F - 1) & 31); C = (temp > 0) ? temp : (temp + 32); D = B - F - C; *upper_margin = D; *lower_margin = F; *vsync_len = C; if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x8000) *sync &= ~FB_SYNC_VERT_HIGH_ACT; else *sync |= FB_SYNC_VERT_HIGH_ACT; if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x4000) *sync &= ~FB_SYNC_HOR_HIGH_ACT; else *sync |= FB_SYNC_HOR_HIGH_ACT; *vmode = FB_VMODE_NONINTERLACED; if (XGI_Pr->RefIndex[RefreshRateTableIndex].Ext_InfoFlag & 0x0080) *vmode = FB_VMODE_INTERLACED; else { j = 0; while (XGI_Pr->EModeIDTable[j].Ext_ModeID != 0xff) { if (XGI_Pr->EModeIDTable[j].Ext_ModeID == XGI_Pr->RefIndex[RefreshRateTableIndex].ModeID) { if (XGI_Pr->EModeIDTable[j].Ext_ModeFlag & DoubleScanMode) { *vmode = FB_VMODE_DOUBLE; } break; } j++; } } return 1; } static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr) { XGI_Pr->RelIO = BaseAddr; XGI_Pr->P3c4 = BaseAddr + 0x14; XGI_Pr->P3d4 = BaseAddr + 0x24; XGI_Pr->P3c0 = BaseAddr + 0x10; XGI_Pr->P3ce = BaseAddr + 0x1e; XGI_Pr->P3c2 = BaseAddr + 0x12; XGI_Pr->P3ca = BaseAddr + 0x1a; XGI_Pr->P3c6 = BaseAddr + 0x16; XGI_Pr->P3c7 = BaseAddr + 0x17; XGI_Pr->P3c8 = BaseAddr + 0x18; XGI_Pr->P3c9 = BaseAddr + 0x19; XGI_Pr->P3da = BaseAddr + 0x2A; /* Digital video interface registers (LCD) */ XGI_Pr->Part1Port = BaseAddr + SIS_CRT2_PORT_04; /* 301 TV Encoder registers */ XGI_Pr->Part2Port = BaseAddr + SIS_CRT2_PORT_10; /* 301 Macrovision registers */ XGI_Pr->Part3Port = BaseAddr + SIS_CRT2_PORT_12; /* 301 VGA2 (and LCD) registers */ XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14; /* 301 palette address port registers */ XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2; } /* ------------------ Internal helper routines ----------------- */ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info) { int found_mode = 0; int XGIfb_mode_idx = 0; found_mode = 0; while ((XGIbios_mode[XGIfb_mode_idx].mode_no != 0) && (XGIbios_mode[XGIfb_mode_idx].xres <= xgifb_info->lvds_data.LVDSHDE)) { if ((XGIbios_mode[XGIfb_mode_idx].xres == xgifb_info->lvds_data.LVDSHDE) && (XGIbios_mode[XGIfb_mode_idx].yres == xgifb_info->lvds_data.LVDSVDE) && (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) { found_mode = 1; break; } XGIfb_mode_idx++; } if (!found_mode) XGIfb_mode_idx = -1; return XGIfb_mode_idx; } static void XGIfb_search_mode(struct xgifb_video_info *xgifb_info, const char *name) { int i = 0, j = 0, l; while (XGIbios_mode[i].mode_no != 0) { l = min(strlen(name), strlen(XGIbios_mode[i].name)); if (!strncmp(name, XGIbios_mode[i].name, l)) { xgifb_info->mode_idx = i; j = 1; break; } i++; } if (!j) pr_info("Invalid mode '%s'\n", name); } static void XGIfb_search_vesamode(struct xgifb_video_info *xgifb_info, unsigned int vesamode) { int i = 0, j = 0; if (vesamode == 0) goto invalid; vesamode &= 0x1dff; /* Clean VESA mode number from other flags */ while (XGIbios_mode[i].mode_no != 0) { if ((XGIbios_mode[i].vesa_mode_no_1 == vesamode) || (XGIbios_mode[i].vesa_mode_no_2 == vesamode)) { xgifb_info->mode_idx = i; j = 1; break; } i++; } invalid: if (!j) pr_info("Invalid VESA mode 0x%x'\n", vesamode); } static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex) { u16 xres, yres; struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info; if (xgifb_info->chip == XG21) { if (xgifb_info->display2 == XGIFB_DISP_LCD) { xres = xgifb_info->lvds_data.LVDSHDE; yres = xgifb_info->lvds_data.LVDSVDE; if (XGIbios_mode[myindex].xres > xres) return -1; if (XGIbios_mode[myindex].yres > yres) return -1; if ((XGIbios_mode[myindex].xres < xres) && (XGIbios_mode[myindex].yres < yres)) { if (XGIbios_mode[myindex].bpp > 8) return -1; } } return myindex; } /* FIXME: for now, all is valid on XG27 */ if (xgifb_info->chip == XG27) return myindex; if (!(XGIbios_mode[myindex].chipset & MD_XGI315)) return -1; switch (xgifb_info->display2) { case XGIFB_DISP_LCD: switch (hw_info->ulCRT2LCDType) { case LCD_640x480: xres = 640; yres = 480; break; case LCD_800x600: xres = 800; yres = 600; break; case LCD_1024x600: xres = 1024; yres = 600; break; case LCD_1024x768: xres = 1024; yres = 768; break; case LCD_1152x768: xres = 1152; yres = 768; break; case LCD_1280x960: xres = 1280; yres = 960; break; case LCD_1280x768: xres = 1280; yres = 768; break; case LCD_1280x1024: xres = 1280; yres = 1024; break; case LCD_1400x1050: xres = 1400; yres = 1050; break; case LCD_1600x1200: xres = 1600; yres = 1200; break; default: xres = 0; yres = 0; break; } if (XGIbios_mode[myindex].xres > xres) return -1; if (XGIbios_mode[myindex].yres > yres) return -1; if ((hw_info->ulExternalChip == 0x01) || /* LVDS */ (hw_info->ulExternalChip == 0x05)) { /* LVDS+Chrontel */ switch (XGIbios_mode[myindex].xres) { case 512: if (XGIbios_mode[myindex].yres != 512) return -1; if (hw_info->ulCRT2LCDType == LCD_1024x600) return -1; break; case 640: if ((XGIbios_mode[myindex].yres != 400) && (XGIbios_mode[myindex].yres != 480)) return -1; break; case 800: if (XGIbios_mode[myindex].yres != 600) return -1; break; case 1024: if ((XGIbios_mode[myindex].yres != 600) && (XGIbios_mode[myindex].yres != 768)) return -1; if ((XGIbios_mode[myindex].yres == 600) && (hw_info->ulCRT2LCDType != LCD_1024x600)) return -1; break; case 1152: if ((XGIbios_mode[myindex].yres) != 768) return -1; if (hw_info->ulCRT2LCDType != LCD_1152x768) return -1; break; case 1280: if ((XGIbios_mode[myindex].yres != 768) && (XGIbios_mode[myindex].yres != 1024)) return -1; if ((XGIbios_mode[myindex].yres == 768) && (hw_info->ulCRT2LCDType != LCD_1280x768)) return -1; break; case 1400: if (XGIbios_mode[myindex].yres != 1050) return -1; break; case 1600: if (XGIbios_mode[myindex].yres != 1200) return -1; break; default: return -1; } } else { switch (XGIbios_mode[myindex].xres) { case 512: if (XGIbios_mode[myindex].yres != 512) return -1; break; case 640: if ((XGIbios_mode[myindex].yres != 400) && (XGIbios_mode[myindex].yres != 480)) return -1; break; case 800: if (XGIbios_mode[myindex].yres != 600) return -1; break; case 1024: if (XGIbios_mode[myindex].yres != 768) return -1; break; case 1280: if ((XGIbios_mode[myindex].yres != 960) && (XGIbios_mode[myindex].yres != 1024)) return -1; if (XGIbios_mode[myindex].yres == 960) { if (hw_info->ulCRT2LCDType == LCD_1400x1050) return -1; } break; case 1400: if (XGIbios_mode[myindex].yres != 1050) return -1; break; case 1600: if (XGIbios_mode[myindex].yres != 1200) return -1; break; default: return -1; } } break; case XGIFB_DISP_TV: switch (XGIbios_mode[myindex].xres) { case 512: case 640: case 800: break; case 720: if (xgifb_info->TV_type == TVMODE_NTSC) { if (XGIbios_mode[myindex].yres != 480) return -1; } else if (xgifb_info->TV_type == TVMODE_PAL) { if (XGIbios_mode[myindex].yres != 576) return -1; } /* TW: LVDS/CHRONTEL does not support 720 */ if (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL || xgifb_info->hasVB == HASVB_CHRONTEL) { return -1; } break; case 1024: if (xgifb_info->TV_type == TVMODE_NTSC) { if (XGIbios_mode[myindex].bpp == 32) return -1; } break; default: return -1; } break; case XGIFB_DISP_CRT: if (XGIbios_mode[myindex].xres > 1280) return -1; break; case XGIFB_DISP_NONE: break; } return myindex; } static void XGIfb_search_crt2type(const char *name) { int i = 0; if (name == NULL) return; while (XGI_crt2type[i].type_no != -1) { if (!strcmp(name, XGI_crt2type[i].name)) { XGIfb_crt2type = XGI_crt2type[i].type_no; XGIfb_tvplug = XGI_crt2type[i].tvplug_no; break; } i++; } if (XGIfb_crt2type < 0) pr_info("Invalid CRT2 type: %s\n", name); } static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info, unsigned int rate) { u16 xres, yres; int i = 0; xres = XGIbios_mode[xgifb_info->mode_idx].xres; yres = XGIbios_mode[xgifb_info->mode_idx].yres; xgifb_info->rate_idx = 0; while ((XGIfb_vrate[i].idx != 0) && (XGIfb_vrate[i].xres <= xres)) { if ((XGIfb_vrate[i].xres == xres) && (XGIfb_vrate[i].yres == yres)) { if (XGIfb_vrate[i].refresh == rate) { xgifb_info->rate_idx = XGIfb_vrate[i].idx; break; } else if (XGIfb_vrate[i].refresh > rate) { if ((XGIfb_vrate[i].refresh - rate) <= 3) { DPRINTK("XGIfb: Adjusting rate from %d up to %d\n", rate, XGIfb_vrate[i].refresh); xgifb_info->rate_idx = XGIfb_vrate[i].idx; xgifb_info->refresh_rate = XGIfb_vrate[i].refresh; } else if (((rate - XGIfb_vrate[i - 1].refresh) <= 2) && (XGIfb_vrate[i].idx != 1)) { DPRINTK("XGIfb: Adjusting rate from %d down to %d\n", rate, XGIfb_vrate[i-1].refresh); xgifb_info->rate_idx = XGIfb_vrate[i - 1].idx; xgifb_info->refresh_rate = XGIfb_vrate[i - 1].refresh; } break; } else if ((rate - XGIfb_vrate[i].refresh) <= 2) { DPRINTK("XGIfb: Adjusting rate from %d down to %d\n", rate, XGIfb_vrate[i].refresh); xgifb_info->rate_idx = XGIfb_vrate[i].idx; break; } } i++; } if (xgifb_info->rate_idx > 0) { return xgifb_info->rate_idx; } else { pr_info("Unsupported rate %d for %dx%d\n", rate, xres, yres); return 0; } } static void XGIfb_search_tvstd(const char *name) { int i = 0; if (name == NULL) return; while (XGI_tvtype[i].type_no != -1) { if (!strcmp(name, XGI_tvtype[i].name)) { XGIfb_tvmode = XGI_tvtype[i].type_no; break; } i++; } } /* ----------- FBDev related routines for all series ----------- */ static void XGIfb_bpp_to_var(struct xgifb_video_info *xgifb_info, struct fb_var_screeninfo *var) { switch (var->bits_per_pixel) { case 8: var->red.offset = var->green.offset = var->blue.offset = 0; var->red.length = var->green.length = var->blue.length = 6; xgifb_info->video_cmap_len = 256; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; xgifb_info->video_cmap_len = 16; break; case 32: var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; xgifb_info->video_cmap_len = 16; break; } } /* --------------------- SetMode routines ------------------------- */ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info) { u8 cr30 = 0, cr31 = 0; cr31 = xgifb_reg_get(XGICR, 0x31); cr31 &= ~0x60; switch (xgifb_info->display2) { case XGIFB_DISP_CRT: cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= SIS_DRIVER_MODE; break; case XGIFB_DISP_LCD: cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= SIS_DRIVER_MODE; break; case XGIFB_DISP_TV: if (xgifb_info->TV_type == TVMODE_HIVISION) cr30 = (SIS_VB_OUTPUT_HIVISION | SIS_SIMULTANEOUS_VIEW_ENABLE); else if (xgifb_info->TV_plug == TVPLUG_SVIDEO) cr30 = (SIS_VB_OUTPUT_SVIDEO | SIS_SIMULTANEOUS_VIEW_ENABLE); else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) cr30 = (SIS_VB_OUTPUT_COMPOSITE | SIS_SIMULTANEOUS_VIEW_ENABLE); else if (xgifb_info->TV_plug == TVPLUG_SCART) cr30 = (SIS_VB_OUTPUT_SCART | SIS_SIMULTANEOUS_VIEW_ENABLE); cr31 |= SIS_DRIVER_MODE; if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL) cr31 |= 0x01; else cr31 &= ~0x01; break; default: /* disable CRT2 */ cr30 = 0x00; cr31 |= (SIS_DRIVER_MODE | SIS_VB_OUTPUT_DISABLE); } xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30); xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31); xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33, (xgifb_info->rate_idx & 0x0F)); } static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info) { u8 reg; unsigned char doit = 1; /* xgifb_reg_set(XGISR,IND_SIS_PASSWORD,SIS_PASSWORD); xgifb_reg_set(XGICR, 0x13, 0x00); xgifb_reg_and_or(XGISR,0x0E, 0xF0, 0x01); *test* */ if (xgifb_info->video_bpp == 8) { /* TW: We can't switch off CRT1 on LVDS/Chrontel * in 8bpp Modes */ if ((xgifb_info->hasVB == HASVB_LVDS) || (xgifb_info->hasVB == HASVB_LVDS_CHRONTEL)) { doit = 0; } /* TW: We can't switch off CRT1 on 301B-DH * in 8bpp Modes if using LCD */ if (xgifb_info->display2 == XGIFB_DISP_LCD) doit = 0; } /* TW: We can't switch off CRT1 if bridge is in slave mode */ if (xgifb_info->hasVB != HASVB_NONE) { reg = xgifb_reg_get(XGIPART1, 0x00); if ((reg & 0x50) == 0x10) doit = 0; } else { XGIfb_crt1off = 0; } reg = xgifb_reg_get(XGICR, 0x17); if ((XGIfb_crt1off) && (doit)) reg &= ~0x80; else reg |= 0x80; xgifb_reg_set(XGICR, 0x17, reg); xgifb_reg_and(XGISR, IND_SIS_RAMDAC_CONTROL, ~0x04); if (xgifb_info->display2 == XGIFB_DISP_TV && xgifb_info->hasVB == HASVB_301) { reg = xgifb_reg_get(XGIPART4, 0x01); if (reg < 0xB0) { /* Set filter for XGI301 */ int filter_tb; switch (xgifb_info->video_width) { case 320: filter_tb = (xgifb_info->TV_type == TVMODE_NTSC) ? 4 : 12; break; case 640: filter_tb = (xgifb_info->TV_type == TVMODE_NTSC) ? 5 : 13; break; case 720: filter_tb = (xgifb_info->TV_type == TVMODE_NTSC) ? 6 : 14; break; case 800: filter_tb = (xgifb_info->TV_type == TVMODE_NTSC) ? 7 : 15; break; default: filter_tb = 0; filter = -1; break; } xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01); if (xgifb_info->TV_type == TVMODE_NTSC) { xgifb_reg_and(XGIPART2, 0x3a, 0x1f); if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { xgifb_reg_and(XGIPART2, 0x30, 0xdf); } else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) { xgifb_reg_or(XGIPART2, 0x30, 0x20); switch (xgifb_info->video_width) { case 640: xgifb_reg_set(XGIPART2, 0x35, 0xEB); xgifb_reg_set(XGIPART2, 0x36, 0x04); xgifb_reg_set(XGIPART2, 0x37, 0x25); xgifb_reg_set(XGIPART2, 0x38, 0x18); break; case 720: xgifb_reg_set(XGIPART2, 0x35, 0xEE); xgifb_reg_set(XGIPART2, 0x36, 0x0C); xgifb_reg_set(XGIPART2, 0x37, 0x22); xgifb_reg_set(XGIPART2, 0x38, 0x08); break; case 800: xgifb_reg_set(XGIPART2, 0x35, 0xEB); xgifb_reg_set(XGIPART2, 0x36, 0x15); xgifb_reg_set(XGIPART2, 0x37, 0x25); xgifb_reg_set(XGIPART2, 0x38, 0xF6); break; } } } else if (xgifb_info->TV_type == TVMODE_PAL) { xgifb_reg_and(XGIPART2, 0x3A, 0x1F); if (xgifb_info->TV_plug == TVPLUG_SVIDEO) { xgifb_reg_and(XGIPART2, 0x30, 0xDF); } else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE) { xgifb_reg_or(XGIPART2, 0x30, 0x20); switch (xgifb_info->video_width) { case 640: xgifb_reg_set(XGIPART2, 0x35, 0xF1); xgifb_reg_set(XGIPART2, 0x36, 0xF7); xgifb_reg_set(XGIPART2, 0x37, 0x1F); xgifb_reg_set(XGIPART2, 0x38, 0x32); break; case 720: xgifb_reg_set(XGIPART2, 0x35, 0xF3); xgifb_reg_set(XGIPART2, 0x36, 0x00); xgifb_reg_set(XGIPART2, 0x37, 0x1D); xgifb_reg_set(XGIPART2, 0x38, 0x20); break; case 800: xgifb_reg_set(XGIPART2, 0x35, 0xFC); xgifb_reg_set(XGIPART2, 0x36, 0xFB); xgifb_reg_set(XGIPART2, 0x37, 0x14); xgifb_reg_set(XGIPART2, 0x38, 0x2A); break; } } } if ((filter >= 0) && (filter <= 7)) { DPRINTK("FilterTable[%d]-%d: %02x %02x %02x %02x\n", filter_tb, filter, XGI_TV_filter[filter_tb]. filter[filter][0], XGI_TV_filter[filter_tb]. filter[filter][1], XGI_TV_filter[filter_tb]. filter[filter][2], XGI_TV_filter[filter_tb]. filter[filter][3] ); xgifb_reg_set( XGIPART2, 0x35, (XGI_TV_filter[filter_tb]. filter[filter][0])); xgifb_reg_set( XGIPART2, 0x36, (XGI_TV_filter[filter_tb]. filter[filter][1])); xgifb_reg_set( XGIPART2, 0x37, (XGI_TV_filter[filter_tb]. filter[filter][2])); xgifb_reg_set( XGIPART2, 0x38, (XGI_TV_filter[filter_tb]. filter[filter][3])); } } } } static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info; unsigned int htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; unsigned int vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; #if defined(__powerpc__) u8 sr_data, cr_data; #endif unsigned int drate = 0, hrate = 0; int found_mode = 0; int old_mode; /* unsigned char reg, reg1; */ DEBUGPRN("Inside do_set_var"); /* printk(KERN_DEBUG "XGIfb:var->yres=%d, var->upper_margin=%d, var->lower_margin=%d, var->vsync_len=%d\n", var->yres, var->upper_margin, var->lower_margin, var->vsync_len); */ info->var.xres_virtual = var->xres_virtual; info->var.yres_virtual = var->yres_virtual; info->var.bits_per_pixel = var->bits_per_pixel; if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) vtotal <<= 1; else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) vtotal <<= 2; else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { /* vtotal <<= 1; */ /* var->yres <<= 1; */ } if (!htotal || !vtotal) { DPRINTK("XGIfb: Invalid 'var' information\n"); return -EINVAL; } pr_debug("var->pixclock=%d, htotal=%d, vtotal=%d\n", var->pixclock, htotal, vtotal); if (var->pixclock && htotal && vtotal) { drate = 1000000000 / var->pixclock; hrate = (drate * 1000) / htotal; xgifb_info->refresh_rate = (unsigned int) (hrate * 2 / vtotal); } else { xgifb_info->refresh_rate = 60; } pr_debug("Change mode to %dx%dx%d-%dHz\n", var->xres, var->yres, var->bits_per_pixel, xgifb_info->refresh_rate); old_mode = xgifb_info->mode_idx; xgifb_info->mode_idx = 0; while ((XGIbios_mode[xgifb_info->mode_idx].mode_no != 0) && (XGIbios_mode[xgifb_info->mode_idx].xres <= var->xres)) { if ((XGIbios_mode[xgifb_info->mode_idx].xres == var->xres) && (XGIbios_mode[xgifb_info->mode_idx].yres == var->yres) && (XGIbios_mode[xgifb_info->mode_idx].bpp == var->bits_per_pixel)) { found_mode = 1; break; } xgifb_info->mode_idx++; } if (found_mode) xgifb_info->mode_idx = XGIfb_validate_mode(xgifb_info, xgifb_info->mode_idx); else xgifb_info->mode_idx = -1; if (xgifb_info->mode_idx < 0) { pr_err("Mode %dx%dx%d not supported\n", var->xres, var->yres, var->bits_per_pixel); xgifb_info->mode_idx = old_mode; return -EINVAL; } if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) { xgifb_info->rate_idx = XGIbios_mode[xgifb_info->mode_idx].rate_idx; xgifb_info->refresh_rate = 60; } if (isactive) { XGIfb_pre_setmode(xgifb_info); if (XGISetModeNew(xgifb_info, hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no) == 0) { pr_err("Setting mode[0x%x] failed\n", XGIbios_mode[xgifb_info->mode_idx].mode_no); return -EINVAL; } info->fix.line_length = ((info->var.xres_virtual * info->var.bits_per_pixel) >> 6); xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD); xgifb_reg_set(XGICR, 0x13, (info->fix.line_length & 0x00ff)); xgifb_reg_set(XGISR, 0x0E, (info->fix.line_length & 0xff00) >> 8); XGIfb_post_setmode(xgifb_info); DPRINTK("XGIfb: Set new mode: %dx%dx%d-%d\n", XGIbios_mode[xgifb_info->mode_idx].xres, XGIbios_mode[xgifb_info->mode_idx].yres, XGIbios_mode[xgifb_info->mode_idx].bpp, xgifb_info->refresh_rate); xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp; xgifb_info->video_vwidth = info->var.xres_virtual; xgifb_info->video_width = XGIbios_mode[xgifb_info->mode_idx].xres; xgifb_info->video_vheight = info->var.yres_virtual; xgifb_info->video_height = XGIbios_mode[xgifb_info->mode_idx].yres; xgifb_info->org_x = xgifb_info->org_y = 0; xgifb_info->video_linelength = info->var.xres_virtual * (xgifb_info->video_bpp >> 3); switch (xgifb_info->video_bpp) { case 8: xgifb_info->DstColor = 0x0000; xgifb_info->XGI310_AccelDepth = 0x00000000; xgifb_info->video_cmap_len = 256; #if defined(__powerpc__) cr_data = xgifb_reg_get(XGICR, 0x4D); xgifb_reg_set(XGICR, 0x4D, (cr_data & 0xE0)); #endif break; case 16: xgifb_info->DstColor = 0x8000; xgifb_info->XGI310_AccelDepth = 0x00010000; #if defined(__powerpc__) cr_data = xgifb_reg_get(XGICR, 0x4D); xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x0B)); #endif xgifb_info->video_cmap_len = 16; break; case 32: xgifb_info->DstColor = 0xC000; xgifb_info->XGI310_AccelDepth = 0x00020000; xgifb_info->video_cmap_len = 16; #if defined(__powerpc__) cr_data = xgifb_reg_get(XGICR, 0x4D); xgifb_reg_set(XGICR, 0x4D, ((cr_data & 0xE0) | 0x15)); #endif break; default: xgifb_info->video_cmap_len = 16; pr_err("Unsupported depth %d", xgifb_info->video_bpp); break; } } XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/ DEBUGPRN("End of do_set_var"); dumpVGAReg(); return 0; } static int XGIfb_pan_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; unsigned int base; /* printk("Inside pan_var"); */ base = var->yoffset * info->var.xres_virtual + var->xoffset; /* calculate base bpp dep. */ switch (info->var.bits_per_pixel) { case 16: base >>= 1; break; case 32: break; case 8: default: base >>= 2; break; } xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD); xgifb_reg_set(XGICR, 0x0D, base & 0xFF); xgifb_reg_set(XGICR, 0x0C, (base >> 8) & 0xFF); xgifb_reg_set(XGISR, 0x0D, (base >> 16) & 0xFF); xgifb_reg_set(XGISR, 0x37, (base >> 24) & 0x03); xgifb_reg_and_or(XGISR, 0x37, 0xDF, (base >> 21) & 0x04); if (xgifb_info->display2 != XGIFB_DISP_NONE) { xgifb_reg_or(XGIPART1, SIS_CRT2_WENABLE_315, 0x01); xgifb_reg_set(XGIPART1, 0x06, (base & 0xFF)); xgifb_reg_set(XGIPART1, 0x05, ((base >> 8) & 0xFF)); xgifb_reg_set(XGIPART1, 0x04, ((base >> 16) & 0xFF)); xgifb_reg_and_or(XGIPART1, 0x02, 0x7F, ((base >> 24) & 0x01) << 7); } /* printk("End of pan_var"); */ return 0; } static int XGIfb_open(struct fb_info *info, int user) { return 0; } static int XGIfb_release(struct fb_info *info, int user) { return 0; } static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var) { int rc = 16; switch (var->bits_per_pixel) { case 8: rc = 256; break; case 16: rc = 16; break; case 32: rc = 16; break; } return rc; } static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; if (regno >= XGIfb_get_cmap_len(&info->var)) return 1; switch (info->var.bits_per_pixel) { case 8: outb(regno, XGIDACA); outb((red >> 10), XGIDACD); outb((green >> 10), XGIDACD); outb((blue >> 10), XGIDACD); if (xgifb_info->display2 != XGIFB_DISP_NONE) { outb(regno, XGIDAC2A); outb((red >> 8), XGIDAC2D); outb((green >> 8), XGIDAC2D); outb((blue >> 8), XGIDAC2D); } break; case 16: ((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800)) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; case 32: red >>= 8; green >>= 8; blue >>= 8; ((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green << 8) | (blue); break; } return 0; } /* ----------- FBDev related routines for all series ---------- */ static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; DEBUGPRN("inside get_fix"); memset(fix, 0, sizeof(struct fb_fix_screeninfo)); fix->smem_start = xgifb_info->video_base; fix->smem_len = xgifb_info->video_size; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; if (xgifb_info->video_bpp == 8) fix->visual = FB_VISUAL_PSEUDOCOLOR; else fix->visual = FB_VISUAL_DIRECTCOLOR; fix->xpanstep = 0; if (XGIfb_ypan) fix->ypanstep = 1; fix->ywrapstep = 0; fix->line_length = xgifb_info->video_linelength; fix->mmio_start = xgifb_info->mmio_base; fix->mmio_len = xgifb_info->mmio_size; fix->accel = FB_ACCEL_SIS_XABRE; DEBUGPRN("end of get_fix"); return 0; } static int XGIfb_set_par(struct fb_info *info) { int err; /* printk("XGIfb: inside set_par\n"); */ err = XGIfb_do_set_var(&info->var, 1, info); if (err) return err; XGIfb_get_fix(&info->fix, -1, info); /* printk("XGIfb: end of set_par\n"); */ return 0; } static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; unsigned int htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; unsigned int vtotal = 0; unsigned int drate = 0, hrate = 0; int found_mode = 0; int refresh_rate, search_idx; DEBUGPRN("Inside check_var"); if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) { vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; vtotal <<= 1; } else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; vtotal <<= 2; } else if ((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { vtotal = var->upper_margin + (var->yres / 2) + var->lower_margin + var->vsync_len; } else vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; if (!(htotal) || !(vtotal)) XGIFAIL("XGIfb: no valid timing data"); if (var->pixclock && htotal && vtotal) { drate = 1000000000 / var->pixclock; hrate = (drate * 1000) / htotal; xgifb_info->refresh_rate = (unsigned int) (hrate * 2 / vtotal); pr_debug( "%s: pixclock = %d ,htotal=%d, vtotal=%d\n" "%s: drate=%d, hrate=%d, refresh_rate=%d\n", __func__, var->pixclock, htotal, vtotal, __func__, drate, hrate, xgifb_info->refresh_rate); } else { xgifb_info->refresh_rate = 60; } /* if ((var->pixclock) && (htotal)) { drate = 1E12 / var->pixclock; hrate = drate / htotal; refresh_rate = (unsigned int) (hrate / vtotal * 2 + 0.5); } else { refresh_rate = 60; } */ /* TW: Calculation wrong for 1024x600 - force it to 60Hz */ if ((var->xres == 1024) && (var->yres == 600)) refresh_rate = 60; search_idx = 0; while ((XGIbios_mode[search_idx].mode_no != 0) && (XGIbios_mode[search_idx].xres <= var->xres)) { if ((XGIbios_mode[search_idx].xres == var->xres) && (XGIbios_mode[search_idx].yres == var->yres) && (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) { if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) { found_mode = 1; break; } } search_idx++; } if (!found_mode) { pr_err("%dx%dx%d is no valid mode\n", var->xres, var->yres, var->bits_per_pixel); search_idx = 0; while (XGIbios_mode[search_idx].mode_no != 0) { if ((var->xres <= XGIbios_mode[search_idx].xres) && (var->yres <= XGIbios_mode[search_idx].yres) && (var->bits_per_pixel == XGIbios_mode[search_idx].bpp)) { if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) { found_mode = 1; break; } } search_idx++; } if (found_mode) { var->xres = XGIbios_mode[search_idx].xres; var->yres = XGIbios_mode[search_idx].yres; pr_debug("Adapted to mode %dx%dx%d\n", var->xres, var->yres, var->bits_per_pixel); } else { pr_err("Failed to find similar mode to %dx%dx%d\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } } /* TW: TODO: Check the refresh rate */ /* Adapt RGB settings */ XGIfb_bpp_to_var(xgifb_info, var); /* Sanity check for offsets */ if (var->xoffset < 0) var->xoffset = 0; if (var->yoffset < 0) var->yoffset = 0; if (!XGIfb_ypan) { if (var->xres != var->xres_virtual) var->xres_virtual = var->xres; if (var->yres != var->yres_virtual) var->yres_virtual = var->yres; } /* else { */ /* TW: Now patch yres_virtual if we use panning */ /* May I do this? */ /* var->yres_virtual = xgifb_info->heapstart / (var->xres * (var->bits_per_pixel >> 3)); */ /* if (var->yres_virtual <= var->yres) { */ /* TW: Paranoia check */ /* var->yres_virtual = var->yres; */ /* } */ /* } */ /* Truncate offsets to maximum if too high */ if (var->xoffset > var->xres_virtual - var->xres) var->xoffset = var->xres_virtual - var->xres - 1; if (var->yoffset > var->yres_virtual - var->yres) var->yoffset = var->yres_virtual - var->yres - 1; /* Set everything else to 0 */ var->red.msb_right = var->green.msb_right = var->blue.msb_right = var->transp.offset = var->transp.length = var->transp.msb_right = 0; DEBUGPRN("end of check_var"); return 0; } static int XGIfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { int err; /* printk("\nInside pan_display:\n"); */ if (var->xoffset > (info->var.xres_virtual - info->var.xres)) return -EINVAL; if (var->yoffset > (info->var.yres_virtual - info->var.yres)) return -EINVAL; if (var->vmode & FB_VMODE_YWRAP) { if (var->yoffset < 0 || var->yoffset >= info->var.yres_virtual || var->xoffset) return -EINVAL; } else { if (var->xoffset + info->var.xres > info->var.xres_virtual || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; } err = XGIfb_pan_var(var, info); if (err < 0) return err; info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; if (var->vmode & FB_VMODE_YWRAP) info->var.vmode |= FB_VMODE_YWRAP; else info->var.vmode &= ~FB_VMODE_YWRAP; /* printk("End of pan_display\n"); */ return 0; } static int XGIfb_blank(int blank, struct fb_info *info) { struct xgifb_video_info *xgifb_info = info->par; u8 reg; reg = xgifb_reg_get(XGICR, 0x17); if (blank > 0) reg &= 0x7f; else reg |= 0x80; xgifb_reg_set(XGICR, 0x17, reg); xgifb_reg_set(XGISR, 0x00, 0x01); /* Synchronous Reset */ xgifb_reg_set(XGISR, 0x00, 0x03); /* End Reset */ return 0; } static struct fb_ops XGIfb_ops = { .owner = THIS_MODULE, .fb_open = XGIfb_open, .fb_release = XGIfb_release, .fb_check_var = XGIfb_check_var, .fb_set_par = XGIfb_set_par, .fb_setcolreg = XGIfb_setcolreg, .fb_pan_display = XGIfb_pan_display, .fb_blank = XGIfb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, /* .fb_mmap = XGIfb_mmap, */ }; /* ---------------- Chip generation dependent routines ---------------- */ /* for XGI 315/550/650/740/330 */ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info) { u8 ChannelNum, tmp; u8 reg = 0; /* xorg driver sets 32MB * 1 channel */ if (xgifb_info->chip == XG27) xgifb_reg_set(XGISR, IND_SIS_DRAM_SIZE, 0x51); reg = xgifb_reg_get(XGISR, IND_SIS_DRAM_SIZE); switch ((reg & XGI_DRAM_SIZE_MASK) >> 4) { case XGI_DRAM_SIZE_1MB: xgifb_info->video_size = 0x100000; break; case XGI_DRAM_SIZE_2MB: xgifb_info->video_size = 0x200000; break; case XGI_DRAM_SIZE_4MB: xgifb_info->video_size = 0x400000; break; case XGI_DRAM_SIZE_8MB: xgifb_info->video_size = 0x800000; break; case XGI_DRAM_SIZE_16MB: xgifb_info->video_size = 0x1000000; break; case XGI_DRAM_SIZE_32MB: xgifb_info->video_size = 0x2000000; break; case XGI_DRAM_SIZE_64MB: xgifb_info->video_size = 0x4000000; break; case XGI_DRAM_SIZE_128MB: xgifb_info->video_size = 0x8000000; break; case XGI_DRAM_SIZE_256MB: xgifb_info->video_size = 0x10000000; break; default: return -1; } tmp = (reg & 0x0c) >> 2; switch (xgifb_info->chip) { case XG20: case XG21: case XG27: ChannelNum = 1; break; case XG42: if (reg & 0x04) ChannelNum = 2; else ChannelNum = 1; break; case XG45: if (tmp == 1) ChannelNum = 2; else if (tmp == 2) ChannelNum = 3; else if (tmp == 3) ChannelNum = 4; else ChannelNum = 1; break; case XG40: default: if (tmp == 2) ChannelNum = 2; else if (tmp == 3) ChannelNum = 3; else ChannelNum = 1; break; } xgifb_info->video_size = xgifb_info->video_size * ChannelNum; /* PLiad fixed for benchmarking and fb set */ /* xgifb_info->video_size = 0x200000; */ /* 1024x768x16 */ /* xgifb_info->video_size = 0x1000000; */ /* benchmark */ pr_info("SR14=%x DramSzie %x ChannelNum %x\n", reg, xgifb_info->video_size, ChannelNum); return 0; } static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info) { u8 cr32, temp = 0; xgifb_info->TV_plug = xgifb_info->TV_type = 0; switch (xgifb_info->hasVB) { case HASVB_LVDS_CHRONTEL: case HASVB_CHRONTEL: break; case HASVB_301: case HASVB_302: /* XGI_Sense30x(); */ /* Yi-Lin TV Sense? */ break; } cr32 = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR32); if ((cr32 & SIS_CRT1) && !XGIfb_crt1off) XGIfb_crt1off = 0; else { if (cr32 & 0x5F) XGIfb_crt1off = 1; else XGIfb_crt1off = 0; } if (!xgifb_info->display2_force) { if (cr32 & SIS_VB_TV) xgifb_info->display2 = XGIFB_DISP_TV; else if (cr32 & SIS_VB_LCD) xgifb_info->display2 = XGIFB_DISP_LCD; else if (cr32 & SIS_VB_CRT2) xgifb_info->display2 = XGIFB_DISP_CRT; else xgifb_info->display2 = XGIFB_DISP_NONE; } if (XGIfb_tvplug != -1) /* PR/TW: Override with option */ xgifb_info->TV_plug = XGIfb_tvplug; else if (cr32 & SIS_VB_HIVISION) { xgifb_info->TV_type = TVMODE_HIVISION; xgifb_info->TV_plug = TVPLUG_SVIDEO; } else if (cr32 & SIS_VB_SVIDEO) xgifb_info->TV_plug = TVPLUG_SVIDEO; else if (cr32 & SIS_VB_COMPOSITE) xgifb_info->TV_plug = TVPLUG_COMPOSITE; else if (cr32 & SIS_VB_SCART) xgifb_info->TV_plug = TVPLUG_SCART; if (xgifb_info->TV_type == 0) { temp = xgifb_reg_get(XGICR, 0x38); if (temp & 0x10) xgifb_info->TV_type = TVMODE_PAL; else xgifb_info->TV_type = TVMODE_NTSC; } /* TW: Copy forceCRT1 option to CRT1off if option is given */ if (XGIfb_forcecrt1 != -1) { if (XGIfb_forcecrt1) XGIfb_crt1off = 0; else XGIfb_crt1off = 1; } } static int XGIfb_has_VB(struct xgifb_video_info *xgifb_info) { u8 vb_chipid; vb_chipid = xgifb_reg_get(XGIPART4, 0x00); switch (vb_chipid) { case 0x01: xgifb_info->hasVB = HASVB_301; break; case 0x02: xgifb_info->hasVB = HASVB_302; break; default: xgifb_info->hasVB = HASVB_NONE; return 0; } return 1; } static void XGIfb_get_VB_type(struct xgifb_video_info *xgifb_info) { u8 reg; if (!XGIfb_has_VB(xgifb_info)) { reg = xgifb_reg_get(XGICR, IND_XGI_SCRATCH_REG_CR37); switch ((reg & SIS_EXTERNAL_CHIP_MASK) >> 1) { case SIS_EXTERNAL_CHIP_LVDS: xgifb_info->hasVB = HASVB_LVDS; break; case SIS_EXTERNAL_CHIP_LVDS_CHRONTEL: xgifb_info->hasVB = HASVB_LVDS_CHRONTEL; break; default: break; } } } static int __init xgifb_optval(char *fullopt, int validx) { unsigned long lres; if (kstrtoul(fullopt + validx, 0, &lres) < 0 || lres > INT_MAX) { pr_err("xgifb: invalid value for option: %s\n", fullopt); return 0; } return lres; } static int __init XGIfb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; pr_info("xgifb: options: %s\n", options); while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "mode:", 5)) { mode = this_opt + 5; } else if (!strncmp(this_opt, "vesa:", 5)) { vesa = xgifb_optval(this_opt, 5); } else if (!strncmp(this_opt, "vrate:", 6)) { refresh_rate = xgifb_optval(this_opt, 6); } else if (!strncmp(this_opt, "rate:", 5)) { refresh_rate = xgifb_optval(this_opt, 5); } else if (!strncmp(this_opt, "crt1off", 7)) { XGIfb_crt1off = 1; } else if (!strncmp(this_opt, "filter:", 7)) { filter = xgifb_optval(this_opt, 7); } else if (!strncmp(this_opt, "forcecrt2type:", 14)) { XGIfb_search_crt2type(this_opt + 14); } else if (!strncmp(this_opt, "forcecrt1:", 10)) { XGIfb_forcecrt1 = xgifb_optval(this_opt, 10); } else if (!strncmp(this_opt, "tvmode:", 7)) { XGIfb_search_tvstd(this_opt + 7); } else if (!strncmp(this_opt, "tvstandard:", 11)) { XGIfb_search_tvstd(this_opt + 7); } else if (!strncmp(this_opt, "dstn", 4)) { enable_dstn = 1; /* TW: DSTN overrules forcecrt2type */ XGIfb_crt2type = XGIFB_DISP_LCD; } else if (!strncmp(this_opt, "noypan", 6)) { XGIfb_ypan = 0; } else { mode = this_opt; } } return 0; } static int __devinit xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { u8 reg, reg1; u8 CR48, CR38; int ret; struct fb_info *fb_info; struct xgifb_video_info *xgifb_info; struct xgi_hw_device_info *hw_info; fb_info = framebuffer_alloc(sizeof(*xgifb_info), &pdev->dev); if (!fb_info) return -ENOMEM; xgifb_info = fb_info->par; hw_info = &xgifb_info->hw_info; xgifb_info->fb_info = fb_info; xgifb_info->chip_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &xgifb_info->revision_id); hw_info->jChipRevision = xgifb_info->revision_id; xgifb_info->pcibus = pdev->bus->number; xgifb_info->pcislot = PCI_SLOT(pdev->devfn); xgifb_info->pcifunc = PCI_FUNC(pdev->devfn); xgifb_info->subsysvendor = pdev->subsystem_vendor; xgifb_info->subsysdevice = pdev->subsystem_device; xgifb_info->video_base = pci_resource_start(pdev, 0); xgifb_info->mmio_base = pci_resource_start(pdev, 1); xgifb_info->mmio_size = pci_resource_len(pdev, 1); xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30; hw_info->pjIOAddress = (unsigned char *)xgifb_info->vga_base; /* XGI_Pr.RelIO = ioremap(pci_resource_start(pdev, 2), 128) + 0x30; */ pr_info("Relocate IO address: %lx [%08lx]\n", (unsigned long)pci_resource_start(pdev, 2), xgifb_info->dev_info.RelIO); if (pci_enable_device(pdev)) { ret = -EIO; goto error; } if (XGIfb_crt2type != -1) { xgifb_info->display2 = XGIfb_crt2type; xgifb_info->display2_force = true; } XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress); xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD); reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD); if (reg1 != 0xa1) { /*I/O error */ pr_err("I/O error!!!"); ret = -EIO; goto error; } switch (xgifb_info->chip_id) { case PCI_DEVICE_ID_XGI_20: xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN); CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1); if (CR48&GPIOG_READ) xgifb_info->chip = XG21; else xgifb_info->chip = XG20; break; case PCI_DEVICE_ID_XGI_40: xgifb_info->chip = XG40; break; case PCI_DEVICE_ID_XGI_41: xgifb_info->chip = XG41; break; case PCI_DEVICE_ID_XGI_42: xgifb_info->chip = XG42; break; case PCI_DEVICE_ID_XGI_27: xgifb_info->chip = XG27; break; default: ret = -ENODEV; goto error; } pr_info("chipid = %x\n", xgifb_info->chip); hw_info->jChipType = xgifb_info->chip; if (XGIfb_get_dram_size(xgifb_info)) { pr_err("Fatal error: Unable to determine RAM size.\n"); ret = -ENODEV; goto error; } /* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */ xgifb_reg_or(XGISR, IND_SIS_PCI_ADDRESS_SET, (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE)); /* Enable 2D accelerator engine */ xgifb_reg_or(XGISR, IND_SIS_MODULE_ENABLE, SIS_ENABLE_2D); hw_info->ulVideoMemorySize = xgifb_info->video_size; if (!request_mem_region(xgifb_info->video_base, xgifb_info->video_size, "XGIfb FB")) { pr_err("unable request memory size %x\n", xgifb_info->video_size); pr_err("Fatal error: Unable to reserve frame buffer memory\n"); pr_err("Is there another framebuffer driver active?\n"); ret = -ENODEV; goto error; } if (!request_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size, "XGIfb MMIO")) { pr_err("Fatal error: Unable to reserve MMIO region\n"); ret = -ENODEV; goto error_0; } xgifb_info->video_vbase = hw_info->pjVideoMemoryAddress = ioremap(xgifb_info->video_base, xgifb_info->video_size); xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base, xgifb_info->mmio_size); pr_info("Framebuffer at 0x%lx, mapped to 0x%p, size %dk\n", xgifb_info->video_base, xgifb_info->video_vbase, xgifb_info->video_size / 1024); pr_info("MMIO at 0x%lx, mapped to 0x%p, size %ldk\n", xgifb_info->mmio_base, xgifb_info->mmio_vbase, xgifb_info->mmio_size / 1024); pci_set_drvdata(pdev, xgifb_info); if (!XGIInitNew(pdev)) pr_err("XGIInitNew() failed!\n"); xgifb_info->mtrr = (unsigned int) 0; xgifb_info->hasVB = HASVB_NONE; if ((xgifb_info->chip == XG20) || (xgifb_info->chip == XG27)) { xgifb_info->hasVB = HASVB_NONE; } else if (xgifb_info->chip == XG21) { CR38 = xgifb_reg_get(XGICR, 0x38); if ((CR38&0xE0) == 0xC0) xgifb_info->display2 = XGIFB_DISP_LCD; else if ((CR38&0xE0) == 0x60) xgifb_info->hasVB = HASVB_CHRONTEL; else xgifb_info->hasVB = HASVB_NONE; } else { XGIfb_get_VB_type(xgifb_info); } hw_info->ujVBChipID = VB_CHIP_UNKNOWN; hw_info->ulExternalChip = 0; switch (xgifb_info->hasVB) { case HASVB_301: reg = xgifb_reg_get(XGIPART4, 0x01); if (reg >= 0xE0) { hw_info->ujVBChipID = VB_CHIP_302LV; pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg); } else if (reg >= 0xD0) { hw_info->ujVBChipID = VB_CHIP_301LV; pr_info("XGI301LV bridge detected (revision 0x%02x)\n", reg); } /* else if (reg >= 0xB0) { hw_info->ujVBChipID = VB_CHIP_301B; reg1 = xgifb_reg_get(XGIPART4, 0x23); printk("XGIfb: XGI301B bridge detected\n"); } */ else { hw_info->ujVBChipID = VB_CHIP_301; pr_info("XGI301 bridge detected\n"); } break; case HASVB_302: reg = xgifb_reg_get(XGIPART4, 0x01); if (reg >= 0xE0) { hw_info->ujVBChipID = VB_CHIP_302LV; pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg); } else if (reg >= 0xD0) { hw_info->ujVBChipID = VB_CHIP_301LV; pr_info("XGI302LV bridge detected (revision 0x%02x)\n", reg); } else if (reg >= 0xB0) { reg1 = xgifb_reg_get(XGIPART4, 0x23); hw_info->ujVBChipID = VB_CHIP_302B; } else { hw_info->ujVBChipID = VB_CHIP_302; pr_info("XGI302 bridge detected\n"); } break; case HASVB_LVDS: hw_info->ulExternalChip = 0x1; pr_info("LVDS transmitter detected\n"); break; case HASVB_TRUMPION: hw_info->ulExternalChip = 0x2; pr_info("Trumpion Zurac LVDS scaler detected\n"); break; case HASVB_CHRONTEL: hw_info->ulExternalChip = 0x4; pr_info("Chrontel TV encoder detected\n"); break; case HASVB_LVDS_CHRONTEL: hw_info->ulExternalChip = 0x5; pr_info("LVDS transmitter and Chrontel TV encoder detected\n"); break; default: pr_info("No or unknown bridge type detected\n"); break; } if (xgifb_info->hasVB != HASVB_NONE) XGIfb_detect_VB(xgifb_info); else if (xgifb_info->chip != XG21) xgifb_info->display2 = XGIFB_DISP_NONE; if (xgifb_info->display2 == XGIFB_DISP_LCD) { if (!enable_dstn) { reg = xgifb_reg_get(XGICR, IND_XGI_LCD_PANEL); reg &= 0x0f; hw_info->ulCRT2LCDType = XGI310paneltype[reg]; } } if ((hw_info->ujVBChipID == VB_CHIP_302B) || (hw_info->ujVBChipID == VB_CHIP_301LV) || (hw_info->ujVBChipID == VB_CHIP_302LV)) { int tmp; tmp = xgifb_reg_get(XGICR, 0x34); if (tmp <= 0x13) { /* Currently on LCDA? *(Some BIOSes leave CR38) */ tmp = xgifb_reg_get(XGICR, 0x38); if ((tmp & 0x03) == 0x03) { /* XGI_Pr.XGI_UseLCDA = 1; */ } else { /* Currently on LCDA? *(Some newer BIOSes set D0 in CR35) */ tmp = xgifb_reg_get(XGICR, 0x35); if (tmp & 0x01) { /* XGI_Pr.XGI_UseLCDA = 1; */ } else { tmp = xgifb_reg_get(XGICR, 0x30); if (tmp & 0x20) { tmp = xgifb_reg_get( XGIPART1, 0x13); } } } } } xgifb_info->mode_idx = -1; if (mode) XGIfb_search_mode(xgifb_info, mode); else if (vesa != -1) XGIfb_search_vesamode(xgifb_info, vesa); if (xgifb_info->mode_idx >= 0) xgifb_info->mode_idx = XGIfb_validate_mode(xgifb_info, xgifb_info->mode_idx); if (xgifb_info->mode_idx < 0) { if (xgifb_info->display2 == XGIFB_DISP_LCD && xgifb_info->chip == XG21) xgifb_info->mode_idx = XGIfb_GetXG21DefaultLVDSModeIdx(xgifb_info); else xgifb_info->mode_idx = DEFAULT_MODE; } if (xgifb_info->mode_idx < 0) { dev_err(&pdev->dev, "no supported video mode found\n"); goto error_1; } /* yilin set default refresh rate */ xgifb_info->refresh_rate = refresh_rate; if (xgifb_info->refresh_rate == 0) xgifb_info->refresh_rate = 60; if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) { xgifb_info->rate_idx = XGIbios_mode[xgifb_info->mode_idx].rate_idx; xgifb_info->refresh_rate = 60; } xgifb_info->video_bpp = XGIbios_mode[xgifb_info->mode_idx].bpp; xgifb_info->video_vwidth = xgifb_info->video_width = XGIbios_mode[xgifb_info->mode_idx].xres; xgifb_info->video_vheight = xgifb_info->video_height = XGIbios_mode[xgifb_info->mode_idx].yres; xgifb_info->org_x = xgifb_info->org_y = 0; xgifb_info->video_linelength = xgifb_info->video_width * (xgifb_info->video_bpp >> 3); switch (xgifb_info->video_bpp) { case 8: xgifb_info->DstColor = 0x0000; xgifb_info->XGI310_AccelDepth = 0x00000000; xgifb_info->video_cmap_len = 256; break; case 16: xgifb_info->DstColor = 0x8000; xgifb_info->XGI310_AccelDepth = 0x00010000; xgifb_info->video_cmap_len = 16; break; case 32: xgifb_info->DstColor = 0xC000; xgifb_info->XGI310_AccelDepth = 0x00020000; xgifb_info->video_cmap_len = 16; break; default: xgifb_info->video_cmap_len = 16; pr_info("Unsupported depth %d\n", xgifb_info->video_bpp); break; } pr_info("Default mode is %dx%dx%d (%dHz)\n", xgifb_info->video_width, xgifb_info->video_height, xgifb_info->video_bpp, xgifb_info->refresh_rate); fb_info->var.red.length = 8; fb_info->var.green.length = 8; fb_info->var.blue.length = 8; fb_info->var.activate = FB_ACTIVATE_NOW; fb_info->var.height = -1; fb_info->var.width = -1; fb_info->var.vmode = FB_VMODE_NONINTERLACED; fb_info->var.xres = xgifb_info->video_width; fb_info->var.xres_virtual = xgifb_info->video_width; fb_info->var.yres = xgifb_info->video_height; fb_info->var.yres_virtual = xgifb_info->video_height; fb_info->var.bits_per_pixel = xgifb_info->video_bpp; XGIfb_bpp_to_var(xgifb_info, &fb_info->var); fb_info->var.pixclock = (u32) (1000000000 / XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info, hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no, xgifb_info->rate_idx)); if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no, xgifb_info->rate_idx, &fb_info->var.left_margin, &fb_info->var.right_margin, &fb_info->var.upper_margin, &fb_info->var.lower_margin, &fb_info->var.hsync_len, &fb_info->var.vsync_len, &fb_info->var.sync, &fb_info->var.vmode)) { if ((fb_info->var.vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) { fb_info->var.yres <<= 1; fb_info->var.yres_virtual <<= 1; } else if ((fb_info->var.vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) { fb_info->var.pixclock >>= 1; fb_info->var.yres >>= 1; fb_info->var.yres_virtual >>= 1; } } strncpy(fb_info->fix.id, "XGI", sizeof(fb_info->fix.id) - 1); fb_info->fix.type = FB_TYPE_PACKED_PIXELS; fb_info->fix.xpanstep = 1; fb_info->fix.ypanstep = 1; fb_info->flags = FBINFO_FLAG_DEFAULT; fb_info->screen_base = xgifb_info->video_vbase; fb_info->fbops = &XGIfb_ops; XGIfb_get_fix(&fb_info->fix, -1, fb_info); fb_info->pseudo_palette = xgifb_info->pseudo_palette; fb_alloc_cmap(&fb_info->cmap, 256 , 0); #ifdef CONFIG_MTRR xgifb_info->mtrr = mtrr_add(xgifb_info->video_base, xgifb_info->video_size, MTRR_TYPE_WRCOMB, 1); if (xgifb_info->mtrr >= 0) dev_info(&pdev->dev, "added MTRR\n"); #endif if (register_framebuffer(fb_info) < 0) { ret = -EINVAL; goto error_mtrr; } dumpVGAReg(); return 0; error_mtrr: #ifdef CONFIG_MTRR if (xgifb_info->mtrr >= 0) mtrr_del(xgifb_info->mtrr, xgifb_info->video_base, xgifb_info->video_size); #endif /* CONFIG_MTRR */ error_1: iounmap(xgifb_info->mmio_vbase); iounmap(xgifb_info->video_vbase); release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size); error_0: release_mem_region(xgifb_info->video_base, xgifb_info->video_size); error: framebuffer_release(fb_info); return ret; } /*****************************************************/ /* PCI DEVICE HANDLING */ /*****************************************************/ static void __devexit xgifb_remove(struct pci_dev *pdev) { struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); struct fb_info *fb_info = xgifb_info->fb_info; unregister_framebuffer(fb_info); #ifdef CONFIG_MTRR if (xgifb_info->mtrr >= 0) mtrr_del(xgifb_info->mtrr, xgifb_info->video_base, xgifb_info->video_size); #endif /* CONFIG_MTRR */ iounmap(xgifb_info->mmio_vbase); iounmap(xgifb_info->video_vbase); release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size); release_mem_region(xgifb_info->video_base, xgifb_info->video_size); framebuffer_release(fb_info); pci_set_drvdata(pdev, NULL); } static struct pci_driver xgifb_driver = { .name = "xgifb", .id_table = xgifb_pci_table, .probe = xgifb_probe, .remove = __devexit_p(xgifb_remove) }; static int __init xgifb_init(void) { char *option = NULL; if (forcecrt2type != NULL) XGIfb_search_crt2type(forcecrt2type); if (fb_get_options("xgifb", &option)) return -ENODEV; XGIfb_setup(option); return pci_register_driver(&xgifb_driver); } module_init(xgifb_init); /*****************************************************/ /* MODULE */ /*****************************************************/ #ifdef MODULE MODULE_DESCRIPTION("Z7 Z9 Z9S Z11 framebuffer device driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("XGITECH , Others"); module_param(mode, charp, 0); module_param(vesa, int, 0); module_param(filter, int, 0); module_param(forcecrt2type, charp, 0); MODULE_PARM_DESC(forcecrt2type, "\nForce the second display output type. Possible values are NONE,\n" "LCD, TV, VGA, SVIDEO or COMPOSITE.\n"); MODULE_PARM_DESC(mode, "\nSelects the desired default display mode in the format XxYxDepth,\n" "eg. 1024x768x16.\n"); MODULE_PARM_DESC(vesa, "\nSelects the desired default display mode by VESA mode number, eg.\n" "0x117.\n"); MODULE_PARM_DESC(filter, "\nSelects TV flicker filter type (only for systems with a SiS301 video bridge).\n" "(Possible values 0-7, default: [no filter])\n"); static void __exit xgifb_remove_module(void) { pci_unregister_driver(&xgifb_driver); pr_debug("Module unloaded\n"); } module_exit(xgifb_remove_module); #endif /* /MODULE */
gpl-2.0
Perseus71/KT747
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
4808
7021
/******************************************************************************* This contains the functions to handle the platform driver. Copyright (C) 2007-2011 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_net.h> #include "stmmac.h" #ifdef CONFIG_OF static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) { struct device_node *np = pdev->dev.of_node; if (!np) return -ENODEV; *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added * once needed on other platforms. */ if (of_device_is_compatible(np, "st,spear600-gmac")) { plat->pbl = 8; plat->has_gmac = 1; plat->pmt = 1; } return 0; } #else static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) { return -ENOSYS; } #endif /* CONFIG_OF */ /** * stmmac_pltfr_probe * @pdev: platform device pointer * Description: platform_device probe function. It allocates * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ static int stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", __func__, (unsigned int)res->start); return -EBUSY; } addr = ioremap(res->start, resource_size(res)); if (!addr) { pr_err("%s: ERROR: memory mapping failed", __func__); ret = -ENOMEM; goto out_release_region; } if (pdev->dev.of_node) { plat_dat = devm_kzalloc(&pdev->dev, sizeof(struct plat_stmmacenet_data), GFP_KERNEL); if (!plat_dat) { pr_err("%s: ERROR: no memory", __func__); ret = -ENOMEM; goto out_unmap; } ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); if (ret) { pr_err("%s: main dt probe failed", __func__); goto out_unmap; } } else { plat_dat = pdev->dev.platform_data; } /* Custom initialisation (if needed)*/ if (plat_dat->init) { ret = plat_dat->init(pdev); if (unlikely(ret)) goto out_unmap; } priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); goto out_unmap; } /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); /* Get the MAC information */ priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); if (priv->dev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); ret = -ENXIO; goto out_unmap; } /* * On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code * named as "eth_wake_irq" * * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); if (priv->wol_irq == -ENXIO) priv->wol_irq = priv->dev->irq; platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); return 0; out_unmap: iounmap(addr); platform_set_drvdata(pdev, NULL); out_release_region: release_mem_region(res->start, resource_size(res)); return ret; } /** * stmmac_pltfr_remove * @pdev: platform device pointer * Description: this function calls the main to free the net resources * and calls the platforms hook and release the resources (e.g. mem). */ static int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); struct resource *res; int ret = stmmac_dvr_remove(ndev); if (priv->plat->exit) priv->plat->exit(pdev); if (priv->plat->exit) priv->plat->exit(pdev); platform_set_drvdata(pdev, NULL); iounmap((void *)priv->ioaddr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); return ret; } #ifdef CONFIG_PM static int stmmac_pltfr_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_suspend(ndev); } static int stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_resume(ndev); } int stmmac_pltfr_freeze(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_freeze(ndev); } int stmmac_pltfr_restore(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_restore(ndev); } static const struct dev_pm_ops stmmac_pltfr_pm_ops = { .suspend = stmmac_pltfr_suspend, .resume = stmmac_pltfr_resume, .freeze = stmmac_pltfr_freeze, .thaw = stmmac_pltfr_restore, .restore = stmmac_pltfr_restore, }; #else static const struct dev_pm_ops stmmac_pltfr_pm_ops; #endif /* CONFIG_PM */ static const struct of_device_id stmmac_dt_ids[] = { { .compatible = "st,spear600-gmac", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, stmmac_dt_ids); static struct platform_driver stmmac_driver = { .probe = stmmac_pltfr_probe, .remove = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, .owner = THIS_MODULE, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(stmmac_dt_ids), }, }; module_platform_driver(stmmac_driver); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Prism2/HUAWEI-U8686-Open-Source-U8686-JellyBean-kernel-3.4.0-
drivers/s390/cio/ccwgroup.c
4808
16238
/* * bus driver for ccwgroup * * Copyright IBM Corp. 2002, 2009 * * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/device.h> #include <linux/init.h> #include <linux/ctype.h> #include <linux/dcache.h> #include <asm/ccwdev.h> #include <asm/ccwgroup.h> #define CCW_BUS_ID_SIZE 20 /* In Linux 2.4, we had a channel device layer called "chandev" * that did all sorts of obscure stuff for networking devices. * This is another driver that serves as a replacement for just * one of its functions, namely the translation of single subchannels * to devices that use multiple subchannels. */ /* a device matches a driver if all its slave devices match the same * entry of the driver */ static int ccwgroup_bus_match(struct device *dev, struct device_driver * drv) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(drv); if (gdev->creator_id == gdrv->driver_id) return 1; return 0; } static struct bus_type ccwgroup_bus_type; static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) { int i; char str[8]; for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); } } /* * Remove references from ccw devices to ccw group device and from * ccw group device to ccw devices. */ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev) { struct ccw_device *cdev; int i; for (i = 0; i < gdev->count; i++) { cdev = gdev->cdev[i]; if (!cdev) continue; spin_lock_irq(cdev->ccwlock); dev_set_drvdata(&cdev->dev, NULL); spin_unlock_irq(cdev->ccwlock); gdev->cdev[i] = NULL; put_device(&cdev->dev); } } static int ccwgroup_set_online(struct ccwgroup_device *gdev) { struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); int ret = 0; if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_ONLINE) goto out; if (gdrv->set_online) ret = gdrv->set_online(gdev); if (ret) goto out; gdev->state = CCWGROUP_ONLINE; out: atomic_set(&gdev->onoff, 0); return ret; } static int ccwgroup_set_offline(struct ccwgroup_device *gdev) { struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); int ret = 0; if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state == CCWGROUP_OFFLINE) goto out; if (gdrv->set_offline) ret = gdrv->set_offline(gdev); if (ret) goto out; gdev->state = CCWGROUP_OFFLINE; out: atomic_set(&gdev->onoff, 0); return ret; } static ssize_t ccwgroup_online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); unsigned long value; int ret; if (!dev->driver) return -EINVAL; if (!try_module_get(gdrv->driver.owner)) return -EINVAL; ret = strict_strtoul(buf, 0, &value); if (ret) goto out; if (value == 1) ret = ccwgroup_set_online(gdev); else if (value == 0) ret = ccwgroup_set_offline(gdev); else ret = -EINVAL; out: module_put(gdrv->driver.owner); return (ret == 0) ? count : ret; } static ssize_t ccwgroup_online_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); int online; online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0; return scnprintf(buf, PAGE_SIZE, "%d\n", online); } /* * Provide an 'ungroup' attribute so the user can remove group devices no * longer needed or accidentially created. Saves memory :) */ static void ccwgroup_ungroup_callback(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); device_unregister(dev); __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); } static ssize_t ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); int rc; /* Prevent concurrent online/offline processing and ungrouping. */ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) return -EAGAIN; if (gdev->state != CCWGROUP_OFFLINE) { rc = -EINVAL; goto out; } /* Note that we cannot unregister the device from one of its * attribute methods, so we have to use this roundabout approach. */ rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); out: if (rc) { if (rc != -EAGAIN) /* Release onoff "lock" when ungrouping failed. */ atomic_set(&gdev->onoff, 0); return rc; } return count; } static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); static struct attribute *ccwgroup_attrs[] = { &dev_attr_online.attr, &dev_attr_ungroup.attr, NULL, }; static struct attribute_group ccwgroup_attr_group = { .attrs = ccwgroup_attrs, }; static const struct attribute_group *ccwgroup_attr_groups[] = { &ccwgroup_attr_group, NULL, }; static void ccwgroup_release(struct device *dev) { kfree(to_ccwgroupdev(dev)); } static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) { char str[8]; int i, rc; for (i = 0; i < gdev->count; i++) { rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, "group_device"); if (rc) { for (--i; i >= 0; i--) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); return rc; } } for (i = 0; i < gdev->count; i++) { sprintf(str, "cdev%d", i); rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, str); if (rc) { for (--i; i >= 0; i--) { sprintf(str, "cdev%d", i); sysfs_remove_link(&gdev->dev.kobj, str); } for (i = 0; i < gdev->count; i++) sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); return rc; } } return 0; } static int __get_next_bus_id(const char **buf, char *bus_id) { int rc, len; char *start, *end; start = (char *)*buf; end = strchr(start, ','); if (!end) { /* Last entry. Strip trailing newline, if applicable. */ end = strchr(start, '\n'); if (end) *end = '\0'; len = strlen(start) + 1; } else { len = end - start + 1; end++; } if (len < CCW_BUS_ID_SIZE) { strlcpy(bus_id, start, len); rc = 0; } else rc = -EINVAL; *buf = end; return rc; } static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) { int cssid, ssid, devno; /* Must be of form %x.%x.%04x */ if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3) return 0; return 1; } /** * ccwgroup_create_from_string() - create and register a ccw group device * @root: parent device for the new device * @creator_id: identifier of creating driver * @cdrv: ccw driver of slave devices * @num_devices: number of slave devices * @buf: buffer containing comma separated bus ids of slave devices * * Create and register a new ccw group device as a child of @root. Slave * devices are obtained from the list of bus ids given in @buf and must all * belong to @cdrv. * Returns: * %0 on success and an error code on failure. * Context: * non-atomic */ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, struct ccw_driver *cdrv, int num_devices, const char *buf) { struct ccwgroup_device *gdev; int rc, i; char tmp_bus_id[CCW_BUS_ID_SIZE]; const char *curr_buf; gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), GFP_KERNEL); if (!gdev) return -ENOMEM; atomic_set(&gdev->onoff, 0); mutex_init(&gdev->reg_mutex); mutex_lock(&gdev->reg_mutex); gdev->creator_id = creator_id; gdev->count = num_devices; gdev->dev.bus = &ccwgroup_bus_type; gdev->dev.parent = root; gdev->dev.release = ccwgroup_release; device_initialize(&gdev->dev); curr_buf = buf; for (i = 0; i < num_devices && curr_buf; i++) { rc = __get_next_bus_id(&curr_buf, tmp_bus_id); if (rc != 0) goto error; if (!__is_valid_bus_id(tmp_bus_id)) { rc = -EINVAL; goto error; } gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id); /* * All devices have to be of the same type in * order to be grouped. */ if (!gdev->cdev[i] || gdev->cdev[i]->id.driver_info != gdev->cdev[0]->id.driver_info) { rc = -EINVAL; goto error; } /* Don't allow a device to belong to more than one group. */ spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ if (i < num_devices && !curr_buf) { rc = -EINVAL; goto error; } /* Check for trailing stuff. */ if (i == num_devices && strlen(curr_buf) > 0) { rc = -EINVAL; goto error; } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); gdev->dev.groups = ccwgroup_attr_groups; rc = device_add(&gdev->dev); if (rc) goto error; rc = __ccwgroup_create_symlinks(gdev); if (rc) { device_del(&gdev->dev); goto error; } mutex_unlock(&gdev->reg_mutex); return 0; error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } mutex_unlock(&gdev->reg_mutex); put_device(&gdev->dev); return rc; } EXPORT_SYMBOL(ccwgroup_create_from_string); static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; if (action == BUS_NOTIFY_UNBIND_DRIVER) device_schedule_callback(dev, ccwgroup_ungroup_callback); return NOTIFY_OK; } static struct notifier_block ccwgroup_nb = { .notifier_call = ccwgroup_notifier }; static int __init init_ccwgroup(void) { int ret; ret = bus_register(&ccwgroup_bus_type); if (ret) return ret; ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb); if (ret) bus_unregister(&ccwgroup_bus_type); return ret; } static void __exit cleanup_ccwgroup(void) { bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb); bus_unregister(&ccwgroup_bus_type); } module_init(init_ccwgroup); module_exit(cleanup_ccwgroup); /************************** driver stuff ******************************/ static int ccwgroup_probe(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); return gdrv->probe ? gdrv->probe(gdev) : -ENODEV; } static int ccwgroup_remove(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!dev->driver) return 0; if (gdrv->remove) gdrv->remove(gdev); return 0; } static void ccwgroup_shutdown(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!dev->driver) return; if (gdrv->shutdown) gdrv->shutdown(gdev); } static int ccwgroup_pm_prepare(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); /* Fail while device is being set online/offline. */ if (atomic_read(&gdev->onoff)) return -EAGAIN; if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->prepare ? gdrv->prepare(gdev) : 0; } static void ccwgroup_pm_complete(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return; if (gdrv->complete) gdrv->complete(gdev); } static int ccwgroup_pm_freeze(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->freeze ? gdrv->freeze(gdev) : 0; } static int ccwgroup_pm_thaw(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->thaw ? gdrv->thaw(gdev) : 0; } static int ccwgroup_pm_restore(struct device *dev) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) return 0; return gdrv->restore ? gdrv->restore(gdev) : 0; } static const struct dev_pm_ops ccwgroup_pm_ops = { .prepare = ccwgroup_pm_prepare, .complete = ccwgroup_pm_complete, .freeze = ccwgroup_pm_freeze, .thaw = ccwgroup_pm_thaw, .restore = ccwgroup_pm_restore, }; static struct bus_type ccwgroup_bus_type = { .name = "ccwgroup", .match = ccwgroup_bus_match, .probe = ccwgroup_probe, .remove = ccwgroup_remove, .shutdown = ccwgroup_shutdown, .pm = &ccwgroup_pm_ops, }; /** * ccwgroup_driver_register() - register a ccw group driver * @cdriver: driver to be registered * * This function is mainly a wrapper around driver_register(). */ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver) { /* register our new driver with the core */ cdriver->driver.bus = &ccwgroup_bus_type; return driver_register(&cdriver->driver); } EXPORT_SYMBOL(ccwgroup_driver_register); static int __ccwgroup_match_all(struct device *dev, void *data) { return 1; } /** * ccwgroup_driver_unregister() - deregister a ccw group driver * @cdriver: driver to be deregistered * * This function is mainly a wrapper around driver_unregister(). */ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver) { struct device *dev; /* We don't want ccwgroup devices to live longer than their driver. */ while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, __ccwgroup_match_all))) { struct ccwgroup_device *gdev = to_ccwgroupdev(dev); mutex_lock(&gdev->reg_mutex); __ccwgroup_remove_symlinks(gdev); device_unregister(dev); __ccwgroup_remove_cdev_refs(gdev); mutex_unlock(&gdev->reg_mutex); put_device(dev); } driver_unregister(&cdriver->driver); } EXPORT_SYMBOL(ccwgroup_driver_unregister); /** * ccwgroup_probe_ccwdev() - probe function for slave devices * @cdev: ccw device to be probed * * This is a dummy probe function for ccw devices that are slave devices in * a ccw group device. * Returns: * always %0 */ int ccwgroup_probe_ccwdev(struct ccw_device *cdev) { return 0; } EXPORT_SYMBOL(ccwgroup_probe_ccwdev); /** * ccwgroup_remove_ccwdev() - remove function for slave devices * @cdev: ccw device to be removed * * This is a remove function for ccw devices that are slave devices in a ccw * group device. It sets the ccw device offline and also deregisters the * embedding ccw group device. */ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) { struct ccwgroup_device *gdev; /* Ignore offlining errors, device is gone anyway. */ ccw_device_set_offline(cdev); /* If one of its devices is gone, the whole group is done for. */ spin_lock_irq(cdev->ccwlock); gdev = dev_get_drvdata(&cdev->dev); if (!gdev) { spin_unlock_irq(cdev->ccwlock); return; } /* Get ccwgroup device reference for local processing. */ get_device(&gdev->dev); spin_unlock_irq(cdev->ccwlock); /* Unregister group device. */ mutex_lock(&gdev->reg_mutex); if (device_is_registered(&gdev->dev)) { __ccwgroup_remove_symlinks(gdev); device_unregister(&gdev->dev); __ccwgroup_remove_cdev_refs(gdev); } mutex_unlock(&gdev->reg_mutex); /* Release ccwgroup device reference for local processing. */ put_device(&gdev->dev); } EXPORT_SYMBOL(ccwgroup_remove_ccwdev); MODULE_LICENSE("GPL");
gpl-2.0
sxwzhw/iproj-su640
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
4808
5912
/******************************************************************************* This contains the functions to handle the pci driver. Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/pci.h> #include "stmmac.h" struct plat_stmmacenet_data plat_dat; struct stmmac_mdio_bus_data mdio_data; static void stmmac_default_data(void) { memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); plat_dat.bus_id = 1; plat_dat.phy_addr = 0; plat_dat.interface = PHY_INTERFACE_MODE_GMII; plat_dat.pbl = 32; plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat_dat.has_gmac = 1; plat_dat.force_sf_dma_mode = 1; mdio_data.bus_id = 1; mdio_data.phy_reset = NULL; mdio_data.phy_mask = 0; plat_dat.mdio_bus_data = &mdio_data; } /** * stmmac_pci_probe * * @pdev: pci device pointer * @id: pointer to table of device id/id's. * * Description: This probing function gets called for all PCI devices which * match the ID table and are not "owned" by other driver yet. This function * gets passed a "struct pci_dev *" for each device whose entry in the ID table * matches the device. The probe functions returns zero when the driver choose * to take "ownership" of the device or an error code(-ve no) otherwise. */ static int __devinit stmmac_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int ret = 0; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; int i; /* Enable pci device */ ret = pci_enable_device(pdev); if (ret) { pr_err("%s : ERROR: failed to enable %s device\n", __func__, pci_name(pdev)); return ret; } if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) { pr_err("%s: ERROR: failed to get PCI region\n", __func__); ret = -ENODEV; goto err_out_req_reg_failed; } /* Get the base address of device */ for (i = 0; i <= 5; i++) { if (pci_resource_len(pdev, i) == 0) continue; addr = pci_iomap(pdev, i, 0); if (addr == NULL) { pr_err("%s: ERROR: cannot map register memory, aborting", __func__); ret = -EIO; goto err_out_map_failed; } break; } pci_set_master(pdev); stmmac_default_data(); priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); goto err_out; } priv->dev->irq = pdev->irq; priv->wol_irq = pdev->irq; pci_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); return 0; err_out: pci_clear_master(pdev); err_out_map_failed: pci_release_regions(pdev); err_out_req_reg_failed: pci_disable_device(pdev); return ret; } /** * stmmac_dvr_remove * * @pdev: platform device pointer * Description: this function calls the main to free the net resources * and releases the PCI resources. */ static void __devexit stmmac_pci_remove(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); stmmac_dvr_remove(ndev); pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, priv->ioaddr); pci_release_regions(pdev); pci_disable_device(pdev); } #ifdef CONFIG_PM static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *ndev = pci_get_drvdata(pdev); int ret; ret = stmmac_suspend(ndev); pci_save_state(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return ret; } static int stmmac_pci_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); return stmmac_resume(ndev); } #endif #define STMMAC_VENDOR_ID 0x700 #define STMMAC_DEVICE_ID 0x1108 static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)}, {} }; MODULE_DEVICE_TABLE(pci, stmmac_id_table); static struct pci_driver stmmac_driver = { .name = STMMAC_RESOURCE_NAME, .id_table = stmmac_id_table, .probe = stmmac_pci_probe, .remove = __devexit_p(stmmac_pci_remove), #ifdef CONFIG_PM .suspend = stmmac_pci_suspend, .resume = stmmac_pci_resume, #endif }; /** * stmmac_init_module - Entry point for the driver * Description: This function is the entry point for the driver. */ static int __init stmmac_init_module(void) { int ret; ret = pci_register_driver(&stmmac_driver); if (ret < 0) pr_err("%s: ERROR: driver registration failed\n", __func__); return ret; } /** * stmmac_cleanup_module - Cleanup routine for the driver * Description: This function is the cleanup routine for the driver. */ static void __exit stmmac_cleanup_module(void) { pci_unregister_driver(&stmmac_driver); } module_init(stmmac_init_module); module_exit(stmmac_cleanup_module); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL");
gpl-2.0
lozohcum/kernel
sound/soc/codecs/ak4104.c
4808
6152
/* * AK4104 ALSA SoC (ASoC) driver * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/soc.h> #include <sound/initval.h> #include <linux/spi/spi.h> #include <sound/asoundef.h> /* AK4104 registers addresses */ #define AK4104_REG_CONTROL1 0x00 #define AK4104_REG_RESERVED 0x01 #define AK4104_REG_CONTROL2 0x02 #define AK4104_REG_TX 0x03 #define AK4104_REG_CHN_STATUS(x) ((x) + 0x04) #define AK4104_NUM_REGS 10 #define AK4104_REG_MASK 0x1f #define AK4104_READ 0xc0 #define AK4104_WRITE 0xe0 #define AK4104_RESERVED_VAL 0x5b /* Bit masks for AK4104 registers */ #define AK4104_CONTROL1_RSTN (1 << 0) #define AK4104_CONTROL1_PW (1 << 1) #define AK4104_CONTROL1_DIF0 (1 << 2) #define AK4104_CONTROL1_DIF1 (1 << 3) #define AK4104_CONTROL2_SEL0 (1 << 0) #define AK4104_CONTROL2_SEL1 (1 << 1) #define AK4104_CONTROL2_MODE (1 << 2) #define AK4104_TX_TXE (1 << 0) #define AK4104_TX_V (1 << 1) #define DRV_NAME "ak4104-codec" struct ak4104_private { struct regmap *regmap; }; static int ak4104_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int format) { struct snd_soc_codec *codec = codec_dai->codec; int val = 0; int ret; /* set DAI format */ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: val |= AK4104_CONTROL1_DIF0; break; case SND_SOC_DAIFMT_I2S: val |= AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1; break; default: dev_err(codec->dev, "invalid dai format\n"); return -EINVAL; } /* This device can only be slave */ if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) return -EINVAL; ret = snd_soc_update_bits(codec, AK4104_REG_CONTROL1, AK4104_CONTROL1_DIF0 | AK4104_CONTROL1_DIF1, val); if (ret < 0) return ret; return 0; } static int ak4104_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; int val = 0; /* set the IEC958 bits: consumer mode, no copyright bit */ val |= IEC958_AES0_CON_NOT_COPYRIGHT; snd_soc_write(codec, AK4104_REG_CHN_STATUS(0), val); val = 0; switch (params_rate(params)) { case 44100: val |= IEC958_AES3_CON_FS_44100; break; case 48000: val |= IEC958_AES3_CON_FS_48000; break; case 32000: val |= IEC958_AES3_CON_FS_32000; break; default: dev_err(codec->dev, "unsupported sampling rate\n"); return -EINVAL; } return snd_soc_write(codec, AK4104_REG_CHN_STATUS(3), val); } static const struct snd_soc_dai_ops ak4101_dai_ops = { .hw_params = ak4104_hw_params, .set_fmt = ak4104_set_dai_fmt, }; static struct snd_soc_dai_driver ak4104_dai = { .name = "ak4104-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_192000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE }, .ops = &ak4101_dai_ops, }; static int ak4104_probe(struct snd_soc_codec *codec) { struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec); int ret; codec->control_data = ak4104->regmap; ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP); if (ret != 0) return ret; /* set power-up and non-reset bits */ ret = snd_soc_update_bits(codec, AK4104_REG_CONTROL1, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN); if (ret < 0) return ret; /* enable transmitter */ ret = snd_soc_update_bits(codec, AK4104_REG_TX, AK4104_TX_TXE, AK4104_TX_TXE); if (ret < 0) return ret; return 0; } static int ak4104_remove(struct snd_soc_codec *codec) { snd_soc_update_bits(codec, AK4104_REG_CONTROL1, AK4104_CONTROL1_PW | AK4104_CONTROL1_RSTN, 0); return 0; } static struct snd_soc_codec_driver soc_codec_device_ak4104 = { .probe = ak4104_probe, .remove = ak4104_remove, }; static const struct regmap_config ak4104_regmap = { .reg_bits = 8, .val_bits = 8, .max_register = AK4104_NUM_REGS - 1, .read_flag_mask = AK4104_READ, .write_flag_mask = AK4104_WRITE, .cache_type = REGCACHE_RBTREE, }; static int ak4104_spi_probe(struct spi_device *spi) { struct ak4104_private *ak4104; unsigned int val; int ret; spi->bits_per_word = 8; spi->mode = SPI_MODE_0; ret = spi_setup(spi); if (ret < 0) return ret; ak4104 = devm_kzalloc(&spi->dev, sizeof(struct ak4104_private), GFP_KERNEL); if (ak4104 == NULL) return -ENOMEM; ak4104->regmap = regmap_init_spi(spi, &ak4104_regmap); if (IS_ERR(ak4104->regmap)) { ret = PTR_ERR(ak4104->regmap); return ret; } /* read the 'reserved' register - according to the datasheet, it * should contain 0x5b. Not a good way to verify the presence of * the device, but there is no hardware ID register. */ ret = regmap_read(ak4104->regmap, AK4104_REG_RESERVED, &val); if (ret != 0) goto err; if (val != AK4104_RESERVED_VAL) { ret = -ENODEV; goto err; } spi_set_drvdata(spi, ak4104); ret = snd_soc_register_codec(&spi->dev, &soc_codec_device_ak4104, &ak4104_dai, 1); if (ret != 0) goto err; return 0; err: regmap_exit(ak4104->regmap); return ret; } static int __devexit ak4104_spi_remove(struct spi_device *spi) { struct ak4104_private *ak4101 = spi_get_drvdata(spi); regmap_exit(ak4101->regmap); snd_soc_unregister_codec(&spi->dev); return 0; } static struct spi_driver ak4104_spi_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ak4104_spi_probe, .remove = __devexit_p(ak4104_spi_remove), }; module_spi_driver(ak4104_spi_driver); MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("Asahi Kasei AK4104 ALSA SoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
baran0119/kernel_samsung_baffinlitexx
drivers/media/video/saa7134/saa7134-cards.c
4808
200709
/* * * device driver for philips saa7134 based TV cards * card-specific stuff. * * (c) 2001-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include "saa7134-reg.h" #include "saa7134.h" #include "tuner-xc2028.h" #include <media/v4l2-common.h> #include <media/tveeprom.h> #include "tea5767.h" #include "tda18271.h" #include "xc5000.h" #include "s5h1411.h" /* commly used strings */ static char name_mute[] = "mute"; static char name_radio[] = "Radio"; static char name_tv[] = "Television"; static char name_tv_mono[] = "TV (mono only)"; static char name_comp[] = "Composite"; static char name_comp1[] = "Composite1"; static char name_comp2[] = "Composite2"; static char name_comp3[] = "Composite3"; static char name_comp4[] = "Composite4"; static char name_svideo[] = "S-Video"; /* ------------------------------------------------------------------ */ /* board config info */ /* If radio_type !=UNSET, radio_addr should be specified */ struct saa7134_board saa7134_boards[] = { [SAA7134_BOARD_UNKNOWN] = { .name = "UNKNOWN/GENERIC", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = "default", .vmux = 0, .amux = LINE1, }}, }, [SAA7134_BOARD_PROTEUS_PRO] = { /* /me */ .name = "Proteus Pro [philips reference design]", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_FLYVIDEO3000] = { /* "Marco d'Itri" <md@Linux.IT> */ .name = "LifeView FlyVIDEO3000", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xe000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x8000, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x2000, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x8000, }, }, [SAA7134_BOARD_FLYVIDEO2000] = { /* "TC Wan" <tcwan@cs.usm.my> */ .name = "LifeView/Typhoon FlyVIDEO2000", .audio_clock = 0x00200000, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xe000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x2000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x8000, }, }, [SAA7134_BOARD_FLYTVPLATINUM_MINI] = { /* "Arnaud Quette" <aquette@free.fr> */ .name = "LifeView FlyTV Platinum Mini", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_FLYTVPLATINUM_FM] = { /* LifeView FlyTV Platinum FM (LR214WF) */ /* "Peter Missel <peter.missel@onlinehome.de> */ .name = "LifeView FlyTV Platinum FM / Gold", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x1E000, /* Set GP16 and unused 15,14,13 to Output */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x10000, /* GP16=1 selects TV input */ .tv = 1, },{ /* .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ */ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, /* .gpio = 0x4000, */ },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, /* .gpio = 0x4000, */ },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, /* .gpio = 0x4000, */ }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00000, /* GP16=0 selects FM radio antenna */ }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x10000, }, }, [SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM] = { /* RoverMedia TV Link Pro FM (LR138 REV:I) */ /* Eugene Yudin <Eugene.Yudin@gmail.com> */ .name = "RoverMedia TV Link Pro FM", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MFPE05 2 */ .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0xe000, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x8000, .tv = 1, }, { .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, }, { .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x4000, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x2000, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x8000, }, }, [SAA7134_BOARD_EMPRESS] = { /* "Gert Vervoort" <gert.vervoort@philips.com> */ .name = "EMPRESS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, }, [SAA7134_BOARD_MONSTERTV] = { /* "K.Ohta" <alpha292@bremen.or.jp> */ .name = "SKNet Monster TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_MD9717] = { .name = "Tevion MD 9717", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ /* workaround for problems with normal TV sound */ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_comp2, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_TVSTATION_RDS] = { /* Typhoon TV Tuner RDS: Art.Nr. 50694 */ .name = "KNC One TV-Station RDS / Typhoon TV Tuner RDS", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = "CVid over SVid", .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_TVSTATION_DVR] = { .name = "KNC One TV-Station DVR", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x820000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x20000, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x20000, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x20000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x20000, }, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, }, [SAA7134_BOARD_CINERGY400] = { .name = "Terratec Cinergy 400 TV", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp2, /* CVideo over SVideo Connector */ .vmux = 0, .amux = LINE1, }} }, [SAA7134_BOARD_MD5044] = { .name = "Medion 5044", .audio_clock = 0x00187de7, /* was: 0x00200000, */ .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ /* workaround for problems with normal TV sound */ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_KWORLD] = { .name = "Kworld/KuroutoShikou SAA7130-TVPCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_CINERGY600] = { .name = "Terratec Cinergy 600 TV", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp2, /* CVideo over SVideo Connector */ .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_MD7134] = { .name = "Medion 7134", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_TYPHOON_90031] = { /* aka Typhoon "TV+Radio", Art.Nr 90031 */ /* Tom Zoerner <tomzo at users sourceforge net> */ .name = "Typhoon TV+Radio 90031", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ELSA] = { .name = "ELSA EX-VISION 300TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_HITACHI_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_tv, .vmux = 4, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_ELSA_500TV] = { .name = "ELSA EX-VISION 500TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_HITACHI_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 7, .amux = LINE1, },{ .name = name_tv, .vmux = 8, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 8, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_ELSA_700TV] = { .name = "ELSA EX-VISION 700TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_HITACHI_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 4, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 6, .amux = LINE1, },{ .name = name_svideo, .vmux = 7, .amux = LINE1, }}, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_ASUSTeK_TVFM7134] = { .name = "ASUS TV-FM 7134", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE2, },{ .name = name_svideo, .vmux = 6, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE1, }, }, [SAA7134_BOARD_ASUSTeK_TVFM7135] = { .name = "ASUS TV-FM 7135", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE2, .gpio = 0x0000, },{ .name = name_svideo, .vmux = 6, .amux = LINE2, .gpio = 0x0000, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x200000, }, .mute = { .name = name_mute, .gpio = 0x0000, }, }, [SAA7134_BOARD_VA1000POWER] = { .name = "AOPEN VA1000 POWER", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_10MOONSTVMASTER] = { /* "lilicheng" <llc@linuxfans.org> */ .name = "10MOONS PCI TV CAPTURE CARD", .audio_clock = 0x00200000, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xe000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x2000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x8000, }, }, [SAA7134_BOARD_BMK_MPEX_NOTUNER] = { /* "Andrew de Quincey" <adq@lidskialf.net> */ .name = "BMK MPEX No Tuner", .audio_clock = 0x200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .inputs = {{ .name = name_comp1, .vmux = 4, .amux = LINE1, },{ .name = name_comp2, .vmux = 3, .amux = LINE1, },{ .name = name_comp3, .vmux = 0, .amux = LINE1, },{ .name = name_comp4, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, }, [SAA7134_BOARD_VIDEOMATE_TV] = { .name = "Compro VideoMate TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUS] = { .name = "Compro VideoMate TV Gold+", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .gpiomask = 0x800c0000, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x06c00012, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x0ac20012, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .gpio = 0x08c20012, .tv = 1, }}, /* radio and probably mute is missing */ }, [SAA7134_BOARD_CRONOS_PLUS] = { /* gpio pins: 0 .. 3 BASE_ID 4 .. 7 PROTECT_ID 8 .. 11 USER_OUT 12 .. 13 USER_IN 14 .. 15 VIDIN_SEL */ .name = "Matrox CronosPlus", .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xcf00, .inputs = {{ .name = name_comp1, .vmux = 0, .gpio = 2 << 14, },{ .name = name_comp2, .vmux = 0, .gpio = 1 << 14, },{ .name = name_comp3, .vmux = 0, .gpio = 0 << 14, },{ .name = name_comp4, .vmux = 0, .gpio = 3 << 14, },{ .name = name_svideo, .vmux = 8, .gpio = 2 << 14, }}, }, [SAA7134_BOARD_MD2819] = { .name = "AverMedia M156 / Medion 2819", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x02, }, { .name = name_comp2, .vmux = 0, .amux = LINE1, .gpio = 0x02, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x02, } }, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x01, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x00, }, }, [SAA7134_BOARD_BMK_MPEX_TUNER] = { /* "Greg Wickham <greg.wickham@grangenet.net> */ .name = "BMK MPEX Tuner", .audio_clock = 0x200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .inputs = {{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }}, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, }, [SAA7134_BOARD_ASUSTEK_TVFM7133] = { .name = "ASUS TV-FM 7133", .audio_clock = 0x00187de7, /* probably wrong, the 7133 one is the NTSC version ... * .tuner_type = TUNER_PHILIPS_FM1236_MK3 */ .tuner_type = TUNER_LG_NTSC_NEW_TAPC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE2, },{ .name = name_svideo, .vmux = 6, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE1, }, }, [SAA7134_BOARD_PINNACLE_PCTV_STEREO] = { .name = "Pinnacle PCTV Stereo (saa7134)", .audio_clock = 0x00187de7, .tuner_type = TUNER_MT2032, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 1, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_MANLI_MTV002] = { /* Ognjen Nastic <ognjen@logosoft.ba> */ .name = "Manli MuchTV M-TV002", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_MANLI_MTV001] = { /* Ognjen Nastic <ognjen@logosoft.ba> UNTESTED */ .name = "Manli MuchTV M-TV001", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_TG3000TV] = { /* TransGear 3000TV */ .name = "Nagase Sangyo TransGear 3000TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_ECS_TVP3XP] = { .name = "Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM) ", .audio_clock = 0x187de7, /* xtal 32.1 MHz */ .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = "CVid over SVid", .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ECS_TVP3XP_4CB5] = { .name = "Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM)", .audio_clock = 0x187de7, .tuner_type = TUNER_PHILIPS_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = "CVid over SVid", .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ECS_TVP3XP_4CB6] = { /* Barry Scott <barry.scott@onelan.co.uk> */ .name = "Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM)", .audio_clock = 0x187de7, .tuner_type = TUNER_PHILIPS_PAL_I, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = "CVid over SVid", .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_AVACSSMARTTV] = { /* Roman Pszonczenko <romka@kolos.math.uni.lodz.pl> */ .name = "AVACS SmartTV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x200000, }, }, [SAA7134_BOARD_AVERMEDIA_DVD_EZMAKER] = { /* Michael Smith <msmith@cbnco.com> */ .name = "AVerMedia DVD EZMaker", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_comp1, .vmux = 3, },{ .name = name_svideo, .vmux = 8, }}, }, [SAA7134_BOARD_AVERMEDIA_M103] = { /* Massimo Piccioni <dafastidio@libero.it> */ .name = "AVerMedia MiniPCI DVB-T Hybrid M103", .audio_clock = 0x187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, } }, }, [SAA7134_BOARD_NOVAC_PRIMETV7133] = { /* toshii@netbsd.org */ .name = "Noval Prime TV 7133", .audio_clock = 0x00200000, .tuner_type = TUNER_ALPS_TSBH1_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_comp1, .vmux = 3, },{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_svideo, .vmux = 8, }}, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_305] = { .name = "AverMedia AverTV Studio 305", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1256_IH3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE2, }, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_505] = { /* Vasiliy Temnikov <vaka@newmail.ru> */ .name = "AverMedia AverTV Studio 505", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = { { .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE2, }, { .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = LINE2, }, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_UPMOST_PURPLE_TV] = { .name = "UPMOST PURPLE TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1236_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 7, .amux = TV, .tv = 1, },{ .name = name_svideo, .vmux = 7, .amux = LINE1, }}, }, [SAA7134_BOARD_ITEMS_MTV005] = { /* Norman Jonas <normanjonas@arcor.de> */ .name = "Items MuchTV Plus / IT-005", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_CINERGY200] = { .name = "Terratec Cinergy 200 TV", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp2, /* CVideo over SVideo Connector */ .vmux = 0, .amux = LINE1, }}, .mute = { .name = name_mute, .amux = LINE2, }, }, [SAA7134_BOARD_VIDEOMATE_TV_PVR] = { /* Alain St-Denis <alain@topaze.homeip.net> */ .name = "Compro VideoMate TV PVR/FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x808c0080, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x00080, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x00080, },{ .name = name_tv, .vmux = 1, .amux = LINE2_LEFT, .tv = 1, .gpio = 0x00080, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x80000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x40000, }, }, [SAA7134_BOARD_SABRENT_SBTTVFM] = { /* Michael Rodriguez-Torrent <mrtorrent@asu.edu> */ .name = "Sabrent SBT-TVFM (saa7130)", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC_M, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ZOLID_XPERT_TV7134] = { /* Helge Jensen <helge.jensen@slog.dk> */ .name = ":Zolid Xpert TV7134", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_EMPIRE_PCI_TV_RADIO_LE] = { /* "Matteo Az" <matte.az@nospam.libero.it> ;-) */ .name = "Empire PCI TV-Radio LE", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x4000, .inputs = {{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x8000, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x8000, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, .gpio = 0x8000, }}, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x8000, }, .mute = { .name = name_mute, .amux = TV, .gpio =0x8000, } }, [SAA7134_BOARD_AVERMEDIA_STUDIO_307] = { /* Nickolay V. Shmyrev <nshmyrev@yandex.ru> Lots of thanks to Andrey Zolotarev <zolotarev_andrey@mail.ru> */ .name = "Avermedia AVerTV Studio 307", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1256_IH3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00, },{ .name = name_comp, .vmux = 3, .amux = LINE1, .gpio = 0x02, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x02, }}, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x01, }, .mute = { .name = name_mute, .amux = LINE1, .gpio = 0x00, }, }, [SAA7134_BOARD_AVERMEDIA_GO_007_FM] = { .name = "Avermedia AVerTV GO 007 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00300003, /* .gpiomask = 0x8c240003, */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x01, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, .gpio = 0x02, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, .gpio = 0x02, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00300001, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x01, }, }, [SAA7134_BOARD_AVERMEDIA_CARDBUS] = { /* Kees.Blom@cwi.nl */ .name = "AVerMedia Cardbus TV/Radio (E500)", .audio_clock = 0x187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE1, }, }, [SAA7134_BOARD_AVERMEDIA_CARDBUS_501] = { /* Oldrich Jedlicka <oldium.pro@seznam.cz> */ .name = "AVerMedia Cardbus TV/Radio (E501R)", .audio_clock = 0x187de7, .tuner_type = TUNER_ALPS_TSBE5_PAL, .radio_type = TUNER_TEA5767, .tuner_addr = 0x61, .radio_addr = 0x60, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x08000000, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x08000000, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x08000000, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x08000000, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x00000000, }, }, [SAA7134_BOARD_CINERGY400_CARDBUS] = { .name = "Terratec Cinergy 400 mobile", .audio_clock = 0x187de7, .tuner_type = TUNER_ALPS_TSBE5_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_CINERGY600_MK3] = { .name = "Terratec Cinergy 600 TV MK3", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp2, /* CVideo over SVideo Connector */ .vmux = 0, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_VIDEOMATE_GOLD_PLUS] = { /* Dylan Walkden <dylan_walkden@hotmail.com> */ .name = "Compro VideoMate Gold+ Pal", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x1ce780, .inputs = {{ .name = name_svideo, .vmux = 0, /* CVideo over SVideo Connector - ok? */ .amux = LINE1, .gpio = 0x008080, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x008080, },{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x008080, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x80000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x0c8000, }, }, [SAA7134_BOARD_PINNACLE_300I_DVBT_PAL] = { .name = "Pinnacle PCTV 300i DVB-T + PAL", .audio_clock = 0x00187de7, .tuner_type = TUNER_MT2032, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_INACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 1, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_PROVIDEO_PV952] = { /* andreas.kretschmer@web.de */ .name = "ProVideo PV952", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_AVERMEDIA_305] = { /* much like the "studio" version but without radio * and another tuner (sirspiritus@yandex.ru) */ .name = "AverMedia AverTV/305", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FQ1216ME, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_FLYDVBTDUO] = { /* LifeView FlyDVB-T DUO */ /* "Nico Sabbi <nsabbi@tiscali.it> Hartmut Hackmann hartmut.hackmann@t-online.de*/ .name = "LifeView FlyDVB-T DUO / MSI TV@nywhere Duo", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00200000, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ .tv = 1, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, }, [SAA7134_BOARD_PHILIPS_TOUGH] = { .name = "Philips TOUGH DVB-T reference design", .tuner_type = TUNER_ABSENT, .audio_clock = 0x00187de7, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_AVERMEDIA_307] = { /* Davydov Vladimir <vladimir@iqmedia.com> */ .name = "Avermedia AVerTV 307", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FQ1216ME, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_comp2, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_ADS_INSTANT_TV] = { .name = "ADS Tech Instant TV (saa7135)", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_KWORLD_VSTREAM_XPERT] = { .name = "Kworld/Tevion V-Stream Xpert TV PVR7134", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_PAL_I, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x0700, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x000, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x200, /* gpio by DScaler */ },{ .name = name_svideo, .vmux = 0, .amux = LINE1, .gpio = 0x200, }}, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x100, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x000, }, }, [SAA7134_BOARD_FLYDVBT_DUO_CARDBUS] = { .name = "LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ .tv = 1, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, }, [SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII] = { .name = "Compro VideoMate TV Gold+II", .audio_clock = 0x002187de7, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .radio_type = TUNER_TEA5767, .tuner_addr = 0x63, .radio_addr = 0x60, .gpiomask = 0x8c1880, .inputs = {{ .name = name_svideo, .vmux = 0, .amux = LINE1, .gpio = 0x800800, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x801000, },{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x800000, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x880000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x840000, }, }, [SAA7134_BOARD_KWORLD_XPERT] = { /* FIXME: - Remote control doesn't initialize properly. - Audio volume starts muted, then gradually increases after channel change. - Overlay scaling problems (application error?) - Composite S-Video untested. From: Konrad Rzepecki <hannibal@megapolis.pl> */ .name = "Kworld Xpert TV PVR7134", .audio_clock = 0x00187de7, .tuner_type = TUNER_TENA_9533_DI, .radio_type = TUNER_TEA5767, .tuner_addr = 0x61, .radio_addr = 0x60, .gpiomask = 0x0700, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x000, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x200, /* gpio by DScaler */ },{ .name = name_svideo, .vmux = 0, .amux = LINE1, .gpio = 0x200, }}, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x100, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x000, }, }, [SAA7134_BOARD_FLYTV_DIGIMATRIX] = { .name = "FlyTV mini Asus Digimatrix", .audio_clock = 0x00200000, .tuner_type = TUNER_LG_TALN, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, /* radio unconfirmed */ .amux = LINE2, }, }, [SAA7134_BOARD_KWORLD_TERMINATOR] = { /* Kworld V-Stream Studio TV Terminator */ /* "James Webb <jrwebb@qwest.net> */ .name = "V-Stream Studio TV Terminator", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x0000000, .tv = 1, },{ .name = name_comp1, /* Composite input */ .vmux = 3, .amux = LINE2, .gpio = 0x0000000, },{ .name = name_svideo, /* S-Video input */ .vmux = 8, .amux = LINE2, .gpio = 0x0000000, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_YUAN_TUN900] = { /* FIXME: * S-Video and composite sources untested. * Radio not working. * Remote control not yet implemented. * From : codemaster@webgeeks.be */ .name = "Yuan TUN-900 (saa7135)", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr= ADDR_UNSET, .radio_addr= ADDR_UNSET, .gpiomask = 0x00010003, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x01, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x02, },{ .name = name_svideo, .vmux = 6, .amux = LINE2, .gpio = 0x02, }}, .radio = { .name = name_radio, .amux = LINE1, .gpio = 0x00010003, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x01, }, }, [SAA7134_BOARD_BEHOLD_409FM] = { /* <http://tuner.beholder.ru>, Sergey <skiv@orel.ru> */ /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 409 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_GOTVIEW_7135] = { /* Mike Baikov <mike@baikov.com> */ /* Andrey Cvetcov <ays14@yandex.ru> */ .name = "GoTView 7135 PCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00200003, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00200003, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x00200003, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x00200003, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x00200003, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x00200003, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x00200003, }, }, [SAA7134_BOARD_PHILIPS_EUROPA] = { .name = "Philips EUROPA V3 reference design", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TD1316, .radio_type = UNSET, .tuner_addr = 0x61, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_VIDEOMATE_DVBT_300] = { .name = "Compro Videomate DVB-T300", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TD1316, .radio_type = UNSET, .tuner_addr = 0x61, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_VIDEOMATE_DVBT_200] = { .name = "Compro Videomate DVB-T200", .tuner_type = TUNER_ABSENT, .audio_clock = 0x00187de7, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_RTD_VFG7350] = { .name = "RTD Embedded Technologies VFG7350", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x21, .inputs = {{ .name = "Composite 0", .vmux = 0, .amux = LINE1, },{ .name = "Composite 1", .vmux = 1, .amux = LINE2, },{ .name = "Composite 2", .vmux = 2, .amux = LINE1, },{ .name = "Composite 3", .vmux = 3, .amux = LINE2, },{ .name = "S-Video 0", .vmux = 8, .amux = LINE1, },{ .name = "S-Video 1", .vmux = 9, .amux = LINE2, }}, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, .vid_port_opts = ( SET_T_CODE_POLARITY_NON_INVERTED | SET_CLOCK_NOT_DELAYED | SET_CLOCK_INVERTED | SET_VSYNC_OFF ), }, [SAA7134_BOARD_RTD_VFG7330] = { .name = "RTD Embedded Technologies VFG7330", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = "Composite 0", .vmux = 0, .amux = LINE1, },{ .name = "Composite 1", .vmux = 1, .amux = LINE2, },{ .name = "Composite 2", .vmux = 2, .amux = LINE1, },{ .name = "Composite 3", .vmux = 3, .amux = LINE2, },{ .name = "S-Video 0", .vmux = 8, .amux = LINE1, },{ .name = "S-Video 1", .vmux = 9, .amux = LINE2, }}, }, [SAA7134_BOARD_FLYTVPLATINUM_MINI2] = { .name = "LifeView FlyTV Platinum Mini2", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = { /* Michael Krufky <mkrufky@m1k.net> * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder * AFAIK, there is no analog demod, thus, * no support for analog television. */ .name = "AVerMedia AVerTVHD MCE A180", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_MONSTERTV_MOBILE] = { .name = "SKNet MonsterTV Mobile", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, }}, }, [SAA7134_BOARD_PINNACLE_PCTV_110i] = { .name = "Pinnacle PCTV 40i/50i/110i (saa7133)", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x080200000, .inputs = { { .name = name_tv, .vmux = 4, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE2, }, { .name = name_comp2, .vmux = 0, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_ASUSTeK_P7131_DUAL] = { .name = "ASUSTeK P7131 Dual", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000000, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, .gpio = 0x0200000, },{ .name = name_comp2, .vmux = 0, .amux = LINE2, .gpio = 0x0200000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x0200000, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_SEDNA_PC_TV_CARDBUS] = { /* Paul Tom Zalac <pzalac@gmail.com> */ /* Pavel Mihaylov <bin@bash.info> */ .name = "Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)", /* Sedna/MuchTV (OEM) Cardbus TV Tuner */ .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xe880c0, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ASUSTEK_DIGIMATRIX_TV] = { /* "Cyril Lacoux (Yack)" <clacoux@ifeelgood.org> */ .name = "ASUS Digimatrix TV", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FQ1216ME, .tda9887_conf = TDA9887_PRESENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_PHILIPS_TIGER] = { .name = "Philips Tiger reference design", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_MSI_TVATANYWHERE_PLUS] = { .name = "MSI TV@Anywhere plus", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, /* unconfirmed, taken from Philips driver */ },{ .name = name_comp2, .vmux = 0, /* untested, Composite over S-Video */ .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_CINERGY250PCI] = { /* remote-control does not work. The signal about a key press comes in via gpio, but the key code doesn't. Neither does it have an i2c remote control interface. */ .name = "Terratec Cinergy 250 PCI TV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x80200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_svideo, /* NOT tested */ .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_FLYDVB_TRIO] = { /* LifeView LR319 FlyDVB Trio */ /* Peter Missel <peter.missel@onlinehome.de> */ .name = "LifeView FlyDVB Trio", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00200000, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, /* Analog broadcast/cable TV */ .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ .tv = 1, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, }, [SAA7134_BOARD_AVERMEDIA_777] = { .name = "AverTV DVB-T 777", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_FLYDVBT_LR301] = { /* LifeView FlyDVB-T */ /* Giampiero Giancipoli <gianci@libero.it> */ .name = "LifeView FlyDVB-T / Genius VideoWonder DVB-T", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, /* Composite input */ .vmux = 3, .amux = LINE2, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331] = { .name = "ADS Instant TV Duo Cardbus PTV331", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00200000, }}, }, [SAA7134_BOARD_TEVION_DVBT_220RF] = { .name = "Tevion/KWorld DVB-T 220RF", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 1 << 21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_comp2, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_KWORLD_DVBT_210] = { .name = "KWorld DVB-T 210", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 1 << 21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_KWORLD_ATSC110] = { .name = "Kworld ATSC110/115", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TUV1236D, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_AVERMEDIA_A169_B] = { /* AVerMedia A169 */ /* Rickard Osser <ricky@osser.se> */ /* This card has two saa7134 chips on it, but only one of them is currently working. */ .name = "AVerMedia A169 B", .audio_clock = 0x02187de7, .tuner_type = TUNER_LG_TALN, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x0a60000, }, [SAA7134_BOARD_AVERMEDIA_A169_B1] = { /* AVerMedia A169 */ /* Rickard Osser <ricky@osser.se> */ .name = "AVerMedia A169 B1", .audio_clock = 0x02187de7, .tuner_type = TUNER_LG_TALN, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0xca60000, .inputs = {{ .name = name_tv, .vmux = 4, .amux = TV, .tv = 1, .gpio = 0x04a61000, },{ .name = name_comp2, /* Composite SVIDEO (B/W if signal is carried with SVIDEO) */ .vmux = 1, .amux = LINE2, },{ .name = name_svideo, .vmux = 9, /* 9 is correct as S-VIDEO1 according to a169.inf! */ .amux = LINE1, }}, }, [SAA7134_BOARD_MD7134_BRIDGE_2] = { /* The second saa7134 on this card only serves as DVB-S host bridge */ .name = "Medion 7134 Bridge #2", .audio_clock = 0x00187de7, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, }, [SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS] = { .name = "LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x00600000, /* Bit 21 0=Radio, Bit 22 0=TV */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x200000, /* GPIO21=High for TV input */ .tv = 1, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE2, },{ .name = name_comp1, /* Composite signal on S-Video input */ .vmux = 0, .amux = LINE2, },{ .name = name_comp2, /* Composite input */ .vmux = 3, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */ }, }, [SAA7134_BOARD_FLYVIDEO3000_NTSC] = { /* "Zac Bowling" <zac@zacbowling.com> */ .name = "LifeView FlyVIDEO3000 (NTSC)", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_NTSC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xe000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .gpio = 0x8000, .tv = 1, },{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x4000, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x4000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x4000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x2000, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x8000, }, }, [SAA7134_BOARD_MEDION_MD8800_QUADRO] = { .name = "Medion Md8800 Quadro", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_FLYDVBS_LR300] = { /* LifeView FlyDVB-s */ /* Igor M. Liplianin <liplianin@tut.by> */ .name = "LifeView FlyDVB-S /Acorp TV134DS", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, /* Composite input */ .vmux = 3, .amux = LINE1, },{ .name = name_svideo, /* S-Video signal on S-Video input */ .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_PROTEUS_2309] = { .name = "Proteus Pro 2309", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_AVERMEDIA_A16AR] = { /* Petr Baudis <pasky@ucw.cz> */ .name = "AVerMedia TV Hybrid A16AR", .audio_clock = 0x187de7, .tuner_type = TUNER_PHILIPS_TD1316, /* untested */ .radio_type = TUNER_TEA5767, /* untested */ .tuner_addr = ADDR_UNSET, .radio_addr = 0x60, .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE1, }, }, [SAA7134_BOARD_ASUS_EUROPA2_HYBRID] = { .name = "Asus Europa2 OEM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT| TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 4, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = LINE1, }, }, [SAA7134_BOARD_PINNACLE_PCTV_310i] = { .name = "Pinnacle PCTV 310i", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 1, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x000200000, .inputs = {{ .name = name_tv, .vmux = 4, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE2, },{ .name = name_comp2, .vmux = 0, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_507] = { /* Mikhail Fedotov <mo_fedotov@mail.ru> */ .name = "Avermedia AVerTV Studio 507", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1256_IH3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, .gpio = 0x00, },{ .name = name_comp2, .vmux = 3, .amux = LINE2, .gpio = 0x00, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x00, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x01, }, .mute = { .name = name_mute, .amux = LINE1, .gpio = 0x00, }, }, [SAA7134_BOARD_VIDEOMATE_DVBT_200A] = { /* Francis Barber <fedora@barber-family.id.au> */ .name = "Compro Videomate DVB-T200A", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE2, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, }}, }, [SAA7134_BOARD_HAUPPAUGE_HVR1110] = { /* Thomas Genty <tomlohave@gmail.com> */ /* David Bentham <db260179@hotmail.com> */ .name = "Hauppauge WinTV-HVR1110 DVB-T/Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 1, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200100, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000100, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200100, }, }, [SAA7134_BOARD_HAUPPAUGE_HVR1150] = { .name = "Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 3, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_SERIAL, .ts_force_val = 1, .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000100, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0800100, /* GPIO 23 HI for FM */ }, }, [SAA7134_BOARD_HAUPPAUGE_HVR1120] = { .name = "Hauppauge WinTV-HVR1120 DVB-T/Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 3, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_SERIAL, .gpiomask = 0x0800100, /* GPIO 21 is an INPUT */ .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000100, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0800100, /* GPIO 23 HI for FM */ }, }, [SAA7134_BOARD_CINERGY_HT_PCMCIA] = { .name = "Terratec Cinergy HT PCMCIA", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, }}, }, [SAA7134_BOARD_ENCORE_ENLTV] = { /* Steven Walter <stevenrwalter@gmail.com> Juan Pablo Sormani <sorman@gmail.com> */ .name = "Encore ENLTV", .audio_clock = 0x00200000, .tuner_type = TUNER_TNF_5335MF, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = 3, .tv = 1, },{ .name = name_tv_mono, .vmux = 7, .amux = 4, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = 2, },{ .name = name_svideo, .vmux = 0, .amux = 2, }}, .radio = { .name = name_radio, .amux = LINE2, /* .gpio = 0x00300001,*/ .gpio = 0x20000, }, .mute = { .name = name_mute, .amux = 0, }, }, [SAA7134_BOARD_ENCORE_ENLTV_FM] = { /* Juan Pablo Sormani <sorman@gmail.com> */ .name = "Encore ENLTV-FM", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FCV1236D, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = 3, .tv = 1, },{ .name = name_tv_mono, .vmux = 7, .amux = 4, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = 2, },{ .name = name_svideo, .vmux = 0, .amux = 2, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x20000, }, .mute = { .name = name_mute, .amux = 0, }, }, [SAA7134_BOARD_ENCORE_ENLTV_FM53] = { .name = "Encore ENLTV-FM v5.3", .audio_clock = 0x00200000, .tuner_type = TUNER_TNF_5335MF, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x7000, .inputs = { { .name = name_tv, .vmux = 1, .amux = 1, .tv = 1, .gpio = 0x50000, }, { .name = name_comp1, .vmux = 3, .amux = 2, .gpio = 0x2000, }, { .name = name_svideo, .vmux = 8, .amux = 2, .gpio = 0x2000, } }, .radio = { .name = name_radio, .vmux = 1, .amux = 1, }, .mute = { .name = name_mute, .gpio = 0xf000, .amux = 0, }, }, [SAA7134_BOARD_ENCORE_ENLTV_FM3] = { .name = "Encore ENLTV-FM 3", .audio_clock = 0x02187de7, .tuner_type = TUNER_TENA_TNF_5337, .radio_type = TUNER_TEA5767, .tuner_addr = 0x61, .radio_addr = 0x60, .inputs = { { .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .vmux = 1, .amux = LINE1, }, .mute = { .name = name_mute, .amux = LINE1, .gpio = 0x43000, }, }, [SAA7134_BOARD_CINERGY_HT_PCI] = { .name = "Terratec Cinergy HT PCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 6, .amux = LINE1, }}, }, [SAA7134_BOARD_PHILIPS_TIGER_S] = { .name = "Philips Tiger - S Reference design", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_AVERMEDIA_M102] = { .name = "Avermedia M102", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 1<<21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 0, .amux = LINE2, },{ .name = name_svideo, .vmux = 6, .amux = LINE2, }}, }, [SAA7134_BOARD_ASUS_P7131_4871] = { .name = "ASUS P7131 4871", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0200000, }}, }, [SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA] = { .name = "ASUSTeK P7131 Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000000, },{ .name = name_comp1, .vmux = 3, .amux = LINE2, .gpio = 0x0200000, },{ .name = name_comp2, .vmux = 0, .amux = LINE2, .gpio = 0x0200000, },{ .name = name_svideo, .vmux = 8, .amux = LINE2, .gpio = 0x0200000, }}, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_ASUSTeK_P7131_ANALOG] = { .name = "ASUSTeK P7131 Analog", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 1 << 21, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x0000000, }, { .name = name_comp1, .vmux = 3, .amux = LINE2, }, { .name = name_comp2, .vmux = 0, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_SABRENT_TV_PCB05] = { .name = "Sabrent PCMCIA TV-PCB05", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_comp2, .vmux = 0, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_10MOONSTVMASTER3] = { /* Tony Wan <aloha_cn@hotmail.com> */ .name = "10MOONS TM300 TV Card", .audio_clock = 0x00200000, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x7000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x2000, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x2000, }}, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x3000, }, }, [SAA7134_BOARD_AVERMEDIA_SUPER_007] = { .name = "Avermedia Super 007", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, /* FIXME: analog tv untested */ .vmux = 1, .amux = TV, .tv = 1, }}, }, [SAA7134_BOARD_AVERMEDIA_M135A] = { .name = "Avermedia PCI pure analog (M135A)", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .gpiomask = 0x020200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00200000, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x01, }, }, [SAA7134_BOARD_AVERMEDIA_M733A] = { .name = "Avermedia PCI M733A", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .gpiomask = 0x020200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00200000, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x01, }, }, [SAA7134_BOARD_BEHOLD_401] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 401", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FQ1216ME, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_BEHOLD_403] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 403", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FQ1216ME, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_BEHOLD_403FM] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 403 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FQ1216ME, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_405] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 405", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, }, [SAA7134_BOARD_BEHOLD_405FM] = { /* Sergey <skiv@orel.ru> */ /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 405 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, },{ .name = name_comp1, .vmux = 3, .amux = LINE1, },{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_407] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 407", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0xc0c000, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, .gpio = 0xc0c000, },{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, .gpio = 0xc0c000, }}, }, [SAA7134_BOARD_BEHOLD_407FM] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 407 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0xc0c000, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, .gpio = 0xc0c000, },{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, .gpio = 0xc0c000, }}, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0xc0c000, }, }, [SAA7134_BOARD_BEHOLD_409] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 409", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, }, [SAA7134_BOARD_BEHOLD_505FM] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 505 FM", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .mute = { .name = name_mute, .amux = LINE1, }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_505RDS_MK5] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 505 RDS", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .mute = { .name = name_mute, .amux = LINE1, }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_507_9FM] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 507 FM / BeholdTV 509 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_507RDS_MK5] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 507 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_507RDS_MK3] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 507 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { /* Beholder Intl. Ltd. 2008 */ /* Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV Columbus TV/FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_ALPS_TSBE5_PAL, .radio_type = TUNER_TEA5767, .tuner_addr = 0xc2 >> 1, .radio_addr = 0xc0 >> 1, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x000A8004, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, .gpio = 0x000A8004, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, .gpio = 0x000A8000, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x000A8000, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x000A8000, }, }, [SAA7134_BOARD_BEHOLD_607FM_MK3] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 607 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_609FM_MK3] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 609 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_607FM_MK5] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 607 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_609FM_MK5] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 609 FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_607RDS_MK3] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 607 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_609RDS_MK3] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 609 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_607RDS_MK5] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 607 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_609RDS_MK5] = { /* Andrey Melnikoff <temnota@kmv.ru> */ .name = "Beholder BeholdTV 609 RDS", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, },{ .name = name_comp1, .vmux = 1, .amux = LINE1, },{ .name = name_svideo, .vmux = 8, .amux = LINE1, }}, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_BEHOLD_M6] = { /* Igor Kuznetsov <igk@igk.ru> */ /* Andrey Melnikoff <temnota@kmv.ru> */ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ /* Alexey Osipov <lion-simba@pridelands.ru> */ .name = "Beholder BeholdTV M6", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, .vid_port_opts = (SET_T_CODE_POLARITY_NON_INVERTED | SET_CLOCK_NOT_DELAYED | SET_CLOCK_INVERTED | SET_VSYNC_OFF), }, [SAA7134_BOARD_BEHOLD_M63] = { /* Igor Kuznetsov <igk@igk.ru> */ /* Andrey Melnikoff <temnota@kmv.ru> */ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV M63", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, .vid_port_opts = (SET_T_CODE_POLARITY_NON_INVERTED | SET_CLOCK_NOT_DELAYED | SET_CLOCK_INVERTED | SET_VSYNC_OFF), }, [SAA7134_BOARD_BEHOLD_M6_EXTRA] = { /* Igor Kuznetsov <igk@igk.ru> */ /* Andrey Melnikoff <temnota@kmv.ru> */ /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ /* Alexey Osipov <lion-simba@pridelands.ru> */ .name = "Beholder BeholdTV M6 Extra", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216MK5, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .empress_addr = 0x20, .tda9887_conf = TDA9887_PRESENT, .inputs = { { .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, .mpeg = SAA7134_MPEG_EMPRESS, .video_out = CCIR656, .vid_port_opts = (SET_T_CODE_POLARITY_NON_INVERTED | SET_CLOCK_NOT_DELAYED | SET_CLOCK_INVERTED | SET_VSYNC_OFF), }, [SAA7134_BOARD_TWINHAN_DTV_DVB_3056] = { .name = "Twinhan Hybrid DTV-DVB 3056 PCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, /* untested */ .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_GENIUS_TVGO_A11MCE] = { /* Adrian Pardini <pardo.bsso@gmail.com> */ .name = "Genius TVGO AM11MCE", .audio_clock = 0x00200000, .tuner_type = TUNER_TNF_5335MF, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0xf000, .inputs = {{ .name = name_tv_mono, .vmux = 1, .amux = LINE2, .gpio = 0x0000, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x2000, .tv = 1 }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x2000, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x1000, }, .mute = { .name = name_mute, .amux = LINE2, .gpio = 0x6000, }, }, [SAA7134_BOARD_PHILIPS_SNAKE] = { .name = "NXP Snake DVB-S reference design", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, }, [SAA7134_BOARD_CREATIX_CTX953] = { .name = "Medion/Creatix CTX953 Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, }, [SAA7134_BOARD_MSI_TVANYWHERE_AD11] = { .name = "MSI TV@nywhere A/D v1.1", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_AVERMEDIA_CARDBUS_506] = { .name = "AVerMedia Cardbus TV/Radio (E506R)", .audio_clock = 0x187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_AVERMEDIA_A16D] = { .name = "AVerMedia Hybrid TV/Radio (A16D)", .audio_clock = 0x187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, }, { .name = name_comp, .vmux = 0, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_AVERMEDIA_M115] = { .name = "Avermedia M115", .audio_clock = 0x187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, }, [SAA7134_BOARD_VIDEOMATE_T750] = { /* John Newbigin <jn@it.swin.edu.au> */ .name = "Compro VideoMate T750", .audio_clock = 0x00187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = 0x61, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, } }, [SAA7134_BOARD_AVERMEDIA_A700_PRO] = { /* Matthias Schwarzott <zzam@gentoo.org> */ .name = "Avermedia DVB-S Pro A700", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_comp, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 6, .amux = LINE1, } }, }, [SAA7134_BOARD_AVERMEDIA_A700_HYBRID] = { /* Matthias Schwarzott <zzam@gentoo.org> */ .name = "Avermedia DVB-S Hybrid+FM A700", .audio_clock = 0x00187de7, .tuner_type = TUNER_XC2028, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_tv, .vmux = 4, .amux = TV, .tv = 1, }, { .name = name_comp, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 6, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_BEHOLD_H6] = { /* Igor Kuznetsov <igk@igk.ru> */ .name = "Beholder BeholdTV H6", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FMD1216MEX_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_ASUSTeK_TIGER_3IN1] = { .name = "Asus Tiger 3in1", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 2, .gpiomask = 1 << 21, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp, .vmux = 0, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_REAL_ANGEL_220] = { .name = "Zogis Real Angel 220", .audio_clock = 0x00187de7, .tuner_type = TUNER_TNF_5335MF, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x801a8087, .inputs = { { .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, .gpio = 0x624000, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, .gpio = 0x624000, }, { .name = name_svideo, .vmux = 1, .amux = LINE1, .gpio = 0x624000, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x624001, }, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_ADS_INSTANT_HDTV_PCI] = { .name = "ADS Tech Instant HDTV", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TUV1236D, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp, .vmux = 4, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, }, [SAA7134_BOARD_ASUSTeK_TIGER] = { .name = "Asus Tiger Rev:1.00", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 0x0200000, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE2, }, { .name = name_comp2, .vmux = 0, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0200000, }, }, [SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG] = { .name = "Kworld Plus TV Analog Lite PCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_YMEC_TVF_5533MF, .radio_type = TUNER_TEA5767, .tuner_addr = ADDR_UNSET, .radio_addr = 0x60, .gpiomask = 0x80000700, .inputs = { { .name = name_tv, .vmux = 1, .amux = LINE2, .tv = 1, .gpio = 0x100, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x200, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x200, } }, .radio = { .name = name_radio, .vmux = 1, .amux = LINE1, .gpio = 0x100, }, .mute = { .name = name_mute, .vmux = 8, .amux = 2, }, }, [SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = { .name = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .tuner_addr = ADDR_UNSET, .radio_type = UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x8e054000, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, #if 0 /* FIXME */ }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x200, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x200, #endif } }, #if 0 .radio = { .name = name_radio, .vmux = 1, .amux = LINE1, .gpio = 0x100, }, #endif .mute = { .name = name_mute, .vmux = 0, .amux = TV, }, }, [SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS] = { .name = "Avermedia AVerTV GO 007 FM Plus", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00300003, /* .gpiomask = 0x8c240003, */ .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x01, }, { .name = name_svideo, .vmux = 6, .amux = LINE1, .gpio = 0x02, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00300001, }, .mute = { .name = name_mute, .amux = TV, .gpio = 0x01, }, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = { /* Andy Shevchenko <andy@smile.org.ua> */ .name = "Avermedia AVerTV Studio 507UA", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */ .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x03, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x00, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x00, } }, .radio = { .name = name_radio, .amux = LINE2, .gpio = 0x01, }, .mute = { .name = name_mute, .amux = LINE1, .gpio = 0x00, }, }, [SAA7134_BOARD_VIDEOMATE_S350] = { /* Jan D. Louw <jd.louw@mweb.co.za */ .name = "Compro VideoMate S350/S300", .audio_clock = 0x00187de7, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, /* Not tested */ .amux = LINE1 } }, }, [SAA7134_BOARD_BEHOLD_X7] = { /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV X7", .audio_clock = 0x00187de7, .tuner_type = TUNER_XC5000, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_tv, .vmux = 2, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_svideo, .vmux = 9, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_ZOLID_HYBRID_PCI] = { .name = "Zolid Hybrid TV Tuner PCI", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .tuner_config = 0, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = {{ .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, } }, .radio = { /* untested */ .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_ASUS_EUROPA_HYBRID] = { .name = "Asus Europa Hybrid OEM", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TD1316, .radio_type = UNSET, .tuner_addr = 0x61, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 4, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, }, [SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S] = { .name = "Leadtek Winfast DTV1000S", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .inputs = { { .name = name_comp1, .vmux = 3, }, { .name = name_svideo, .vmux = 8, } }, }, [SAA7134_BOARD_BEHOLD_505RDS_MK3] = { /* Beholder Intl. Ltd. 2008 */ /*Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 505 RDS", .audio_clock = 0x00200000, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .rds_addr = 0x10, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x00008000, .inputs = {{ .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .mute = { .name = name_mute, .amux = LINE1, }, .radio = { .name = name_radio, .amux = LINE2, }, }, [SAA7134_BOARD_HAWELL_HW_404M7] = { /* Hawell HW-404M7 & Hawell HW-808M7 */ /* Bogoslovskiy Viktor <bogovic@bk.ru> */ .name = "Hawell HW-404M7", .audio_clock = 0x00200000, .tuner_type = UNSET, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x389c00, .inputs = {{ .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x01fc00, } }, }, [SAA7134_BOARD_BEHOLD_H7] = { /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV H7", .audio_clock = 0x00187de7, .tuner_type = TUNER_XC5000, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { .name = name_tv, .vmux = 2, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_svideo, .vmux = 9, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_BEHOLD_A7] = { /* Beholder Intl. Ltd. Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV A7", .audio_clock = 0x00187de7, .tuner_type = TUNER_XC5000, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = { { .name = name_tv, .vmux = 2, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_svideo, .vmux = 9, .amux = LINE1, } }, .radio = { .name = name_radio, .amux = TV, }, }, [SAA7134_BOARD_TECHNOTREND_BUDGET_T3000] = { .name = "TechoTrend TT-budget T-3000", .tuner_type = TUNER_PHILIPS_TD1316, .audio_clock = 0x00187de7, .radio_type = UNSET, .tuner_addr = 0x63, .radio_addr = ADDR_UNSET, .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, .mpeg = SAA7134_MPEG_DVB, .inputs = {{ .name = name_tv, .vmux = 3, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 0, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, }, [SAA7134_BOARD_VIDEOMATE_M1F] = { /* Pavel Osnova <pvosnova@gmail.com> */ .name = "Compro VideoMate Vista M1F", .audio_clock = 0x00187de7, .tuner_type = TUNER_LG_PAL_NEW_TAPC, .radio_type = TUNER_TEA5767, .tuner_addr = ADDR_UNSET, .radio_addr = 0x60, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp1, .vmux = 3, .amux = LINE2, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = LINE1, }, .mute = { .name = name_mute, .amux = TV, }, }, [SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2] = { /* Timothy Lee <timothy.lee@siriushk.com> */ .name = "MagicPro ProHDTV Pro2 DMB-TH/Hybrid", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_config = 3, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x02050000, .mpeg = SAA7134_MPEG_DVB, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, .gpio = 0x00050000, }, { .name = name_comp1, .vmux = 3, .amux = LINE1, .gpio = 0x00050000, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, .gpio = 0x00050000, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x00050000, }, .mute = { .name = name_mute, .vmux = 0, .amux = TV, .gpio = 0x00050000, }, }, [SAA7134_BOARD_BEHOLD_501] = { /* Beholder Intl. Ltd. 2010 */ /* Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 501", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = { { .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_BEHOLD_503FM] = { /* Beholder Intl. Ltd. 2010 */ /* Dmitry Belimov <d.belimov@gmail.com> */ .name = "Beholder BeholdTV 503 FM", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .gpiomask = 0x00008000, .inputs = { { .name = name_tv, .vmux = 3, .amux = LINE2, .tv = 1, }, { .name = name_comp1, .vmux = 1, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, .mute = { .name = name_mute, .amux = LINE1, }, }, [SAA7134_BOARD_SENSORAY811_911] = { .name = "Sensoray 811/911", .audio_clock = 0x00200000, .tuner_type = TUNER_ABSENT, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .inputs = {{ .name = name_comp1, .vmux = 0, .amux = LINE1, }, { .name = name_comp3, .vmux = 2, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE1, } }, }, [SAA7134_BOARD_KWORLD_PC150U] = { .name = "Kworld PC150-U", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_TDA8290, .radio_type = UNSET, .tuner_addr = ADDR_UNSET, .radio_addr = ADDR_UNSET, .mpeg = SAA7134_MPEG_DVB, .gpiomask = 1 << 21, .ts_type = SAA7134_MPEG_TS_PARALLEL, .inputs = { { .name = name_tv, .vmux = 1, .amux = TV, .tv = 1, }, { .name = name_comp, .vmux = 3, .amux = LINE1, }, { .name = name_svideo, .vmux = 8, .amux = LINE2, } }, .radio = { .name = name_radio, .amux = TV, .gpio = 0x0000000, }, }, }; const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); /* ------------------------------------------------------------------ */ /* PCI ids + subsystem IDs */ struct pci_device_id saa7134_pci_tbl[] = { { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2001, .driver_data = SAA7134_BOARD_PROTEUS_PRO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2001, .driver_data = SAA7134_BOARD_PROTEUS_PRO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x6752, .driver_data = SAA7134_BOARD_EMPRESS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1131, .subdevice = 0x4e85, .driver_data = SAA7134_BOARD_MONSTERTV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x153b, .subdevice = 0x1142, .driver_data = SAA7134_BOARD_CINERGY400, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x153b, .subdevice = 0x1143, .driver_data = SAA7134_BOARD_CINERGY600, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x153b, .subdevice = 0x1158, .driver_data = SAA7134_BOARD_CINERGY600_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x153b, .subdevice = 0x1162, .driver_data = SAA7134_BOARD_CINERGY400_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5169, .subdevice = 0x0138, .driver_data = SAA7134_BOARD_FLYVIDEO3000_NTSC, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5168, .subdevice = 0x0138, .driver_data = SAA7134_BOARD_FLYVIDEO3000, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x4e42, /* "Typhoon PCI Capture TV Card" Art.No. 50673 */ .subdevice = 0x0138, .driver_data = SAA7134_BOARD_FLYVIDEO3000, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x5168, .subdevice = 0x0138, .driver_data = SAA7134_BOARD_FLYVIDEO2000, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x4e42, /* Typhoon */ .subdevice = 0x0138, /* LifeView FlyTV Prime30 OEM */ .driver_data = SAA7134_BOARD_FLYVIDEO2000, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x0212, /* minipci, LR212 */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_MINI, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x14c0, .subdevice = 0x1212, /* minipci, LR1212 */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_MINI2, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x4e42, .subdevice = 0x0212, /* OEM minipci, LR212 */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_MINI, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, /* Animation Technologies (LifeView) */ .subdevice = 0x0214, /* Standard PCI, LR214 Rev E and earlier (SAA7135) */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, /* Animation Technologies (LifeView) */ .subdevice = 0x5214, /* Standard PCI, LR214 Rev F onwards (SAA7131) */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1489, /* KYE */ .subdevice = 0x0214, /* Genius VideoWonder ProTV */ .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM, /* is an LR214WF actually */ },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x16be, .subdevice = 0x0003, .driver_data = SAA7134_BOARD_MD7134, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x16be, /* CTX946 analog TV, HW mpeg, DVB-T */ .subdevice = 0x5000, /* only analog TV and DVB-T for now */ .driver_data = SAA7134_BOARD_MD7134, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1048, .subdevice = 0x226b, .driver_data = SAA7134_BOARD_ELSA, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1048, .subdevice = 0x226a, .driver_data = SAA7134_BOARD_ELSA_500TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1048, .subdevice = 0x226c, .driver_data = SAA7134_BOARD_ELSA_700TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_ASUSTEK, .subdevice = 0x4842, .driver_data = SAA7134_BOARD_ASUSTeK_TVFM7134, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_VENDOR_ID_ASUSTEK, .subdevice = 0x4845, .driver_data = SAA7134_BOARD_ASUSTeK_TVFM7135, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_ASUSTEK, .subdevice = 0x4830, .driver_data = SAA7134_BOARD_ASUSTeK_TVFM7134, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_VENDOR_ID_ASUSTEK, .subdevice = 0x4843, .driver_data = SAA7134_BOARD_ASUSTEK_TVFM7133, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_ASUSTEK, .subdevice = 0x4840, .driver_data = SAA7134_BOARD_ASUSTeK_TVFM7134, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0xfe01, .driver_data = SAA7134_BOARD_TVSTATION_RDS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1894, .subdevice = 0xfe01, .driver_data = SAA7134_BOARD_TVSTATION_RDS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1894, .subdevice = 0xa006, .driver_data = SAA7134_BOARD_TVSTATION_DVR, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1131, .subdevice = 0x7133, .driver_data = SAA7134_BOARD_VA1000POWER, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2001, .driver_data = SAA7134_BOARD_10MOONSTVMASTER, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x185b, .subdevice = 0xc100, .driver_data = SAA7134_BOARD_VIDEOMATE_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x185b, .subdevice = 0xc100, .driver_data = SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_MATROX, .subdevice = 0x48d0, .driver_data = SAA7134_BOARD_CRONOS_PLUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa70b, .driver_data = SAA7134_BOARD_MD2819, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa7a1, .driver_data = SAA7134_BOARD_AVERMEDIA_A700_PRO, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa7a2, .driver_data = SAA7134_BOARD_AVERMEDIA_A700_HYBRID, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x2115, .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_305, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa115, .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_505, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x2108, .driver_data = SAA7134_BOARD_AVERMEDIA_305, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x10ff, .driver_data = SAA7134_BOARD_AVERMEDIA_DVD_EZMAKER, },{ /* AVerMedia CardBus */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xd6ee, .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS, },{ /* AVerMedia CardBus */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xb7e9, .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS_501, }, { /* TransGear 3000TV */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x050c, .driver_data = SAA7134_BOARD_TG3000TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x11bd, .subdevice = 0x002b, .driver_data = SAA7134_BOARD_PINNACLE_PCTV_STEREO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x11bd, .subdevice = 0x002d, .driver_data = SAA7134_BOARD_PINNACLE_300I_DVBT_PAL, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1019, .subdevice = 0x4cb4, .driver_data = SAA7134_BOARD_ECS_TVP3XP, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1019, .subdevice = 0x4cb5, .driver_data = SAA7134_BOARD_ECS_TVP3XP_4CB5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1019, .subdevice = 0x4cb6, .driver_data = SAA7134_BOARD_ECS_TVP3XP_4CB6, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x12ab, .subdevice = 0x0800, .driver_data = SAA7134_BOARD_UPMOST_PURPLE_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x153b, .subdevice = 0x1152, .driver_data = SAA7134_BOARD_CINERGY200, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x185b, .subdevice = 0xc100, .driver_data = SAA7134_BOARD_VIDEOMATE_TV_PVR, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x9715, .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_307, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa70a, .driver_data = SAA7134_BOARD_AVERMEDIA_307, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x185b, .subdevice = 0xc200, .driver_data = SAA7134_BOARD_VIDEOMATE_GOLD_PLUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1540, .subdevice = 0x9524, .driver_data = SAA7134_BOARD_PROVIDEO_PV952, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x0502, /* Cardbus version */ .driver_data = SAA7134_BOARD_FLYDVBT_DUO_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x0306, /* PCI version */ .driver_data = SAA7134_BOARD_FLYDVBTDUO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf31f, .driver_data = SAA7134_BOARD_AVERMEDIA_GO_007_FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf11d, .driver_data = SAA7134_BOARD_AVERMEDIA_M135A, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x4155, .driver_data = SAA7134_BOARD_AVERMEDIA_M733A, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x4255, .driver_data = SAA7134_BOARD_AVERMEDIA_M733A, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2004, .driver_data = SAA7134_BOARD_PHILIPS_TOUGH, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1421, .subdevice = 0x0350, /* PCI version */ .driver_data = SAA7134_BOARD_ADS_INSTANT_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1421, .subdevice = 0x0351, /* PCI version, new revision */ .driver_data = SAA7134_BOARD_ADS_INSTANT_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1421, .subdevice = 0x0370, /* cardbus version */ .driver_data = SAA7134_BOARD_ADS_INSTANT_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1421, .subdevice = 0x1370, /* cardbus version */ .driver_data = SAA7134_BOARD_ADS_INSTANT_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x4e42, /* Typhoon */ .subdevice = 0x0502, /* LifeView LR502 OEM */ .driver_data = SAA7134_BOARD_FLYDVBT_DUO_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x0210, /* mini pci NTSC version */ .driver_data = SAA7134_BOARD_FLYTV_DIGIMATRIX, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1043, .subdevice = 0x0210, /* mini pci PAL/SECAM version */ .driver_data = SAA7134_BOARD_ASUSTEK_DIGIMATRIX_TV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0000, /* It shouldn't break anything, since subdevice id seems unique */ .subdevice = 0x4091, .driver_data = SAA7134_BOARD_BEHOLD_409FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5456, /* GoTView */ .subdevice = 0x7135, .driver_data = SAA7134_BOARD_GOTVIEW_7135, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2004, .driver_data = SAA7134_BOARD_PHILIPS_EUROPA, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x185b, .subdevice = 0xc900, .driver_data = SAA7134_BOARD_VIDEOMATE_DVBT_300, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x185b, .subdevice = 0xc901, .driver_data = SAA7134_BOARD_VIDEOMATE_DVBT_200, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1435, .subdevice = 0x7350, .driver_data = SAA7134_BOARD_RTD_VFG7350, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1435, .subdevice = 0x7330, .driver_data = SAA7134_BOARD_RTD_VFG7330, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, .subdevice = 0x1044, .driver_data = SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1131, .subdevice = 0x4ee9, .driver_data = SAA7134_BOARD_MONSTERTV_MOBILE, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x11bd, .subdevice = 0x002e, .driver_data = SAA7134_BOARD_PINNACLE_PCTV_110i, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x4862, .driver_data = SAA7134_BOARD_ASUSTeK_P7131_DUAL, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2018, .driver_data = SAA7134_BOARD_PHILIPS_TIGER, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1462, .subdevice = 0x6231, /* tda8275a, ks003 IR */ .driver_data = SAA7134_BOARD_MSI_TVATANYWHERE_PLUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1462, .subdevice = 0x8624, /* tda8275, ks003 IR */ .driver_data = SAA7134_BOARD_MSI_TVATANYWHERE_PLUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x153b, .subdevice = 0x1160, .driver_data = SAA7134_BOARD_CINERGY250PCI, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA 7131E */ .subvendor = 0x5168, .subdevice = 0x0319, .driver_data = SAA7134_BOARD_FLYDVB_TRIO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, .subdevice = 0x2c05, .driver_data = SAA7134_BOARD_AVERMEDIA_777, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5168, .subdevice = 0x0301, .driver_data = SAA7134_BOARD_FLYDVBT_LR301, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0331, .subdevice = 0x1421, .driver_data = SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x17de, .subdevice = 0x7201, .driver_data = SAA7134_BOARD_TEVION_DVBT_220RF, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x17de, .subdevice = 0x7250, .driver_data = SAA7134_BOARD_KWORLD_DVBT_210, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA7135HL */ .subvendor = 0x17de, .subdevice = 0x7350, .driver_data = SAA7134_BOARD_KWORLD_ATSC110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA7135HL */ .subvendor = 0x17de, .subdevice = 0x7352, .driver_data = SAA7134_BOARD_KWORLD_ATSC110, /* ATSC 115 */ },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA7135HL */ .subvendor = 0x17de, .subdevice = 0xa134, .driver_data = SAA7134_BOARD_KWORLD_PC150U, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, .subdevice = 0x7360, .driver_data = SAA7134_BOARD_AVERMEDIA_A169_B, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, .subdevice = 0x6360, .driver_data = SAA7134_BOARD_AVERMEDIA_A169_B1, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x16be, .subdevice = 0x0005, .driver_data = SAA7134_BOARD_MD7134_BRIDGE_2, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5168, .subdevice = 0x0300, .driver_data = SAA7134_BOARD_FLYDVBS_LR300, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x4e42, .subdevice = 0x0300,/* LR300 */ .driver_data = SAA7134_BOARD_FLYDVBS_LR300, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1489, .subdevice = 0x0301, .driver_data = SAA7134_BOARD_FLYDVBT_LR301, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, /* Animation Technologies (LifeView) */ .subdevice = 0x0304, .driver_data = SAA7134_BOARD_FLYTVPLATINUM_FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x3306, .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x3502, /* whats the difference to 0x3306 ?*/ .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, .subdevice = 0x3307, /* FlyDVB-T Hybrid Mini PCI */ .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x16be, .subdevice = 0x0007, .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x16be, .subdevice = 0x0008, .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x16be, .subdevice = 0x000d, /* triple CTX948_V1.1.1 */ .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, .subdevice = 0x2c05, .driver_data = SAA7134_BOARD_AVERMEDIA_777, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1489, .subdevice = 0x0502, /* Cardbus version */ .driver_data = SAA7134_BOARD_FLYDVBT_DUO_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0919, /* Philips Proteus PRO 2309 */ .subdevice = 0x2003, .driver_data = SAA7134_BOARD_PROTEUS_2309, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, .subdevice = 0x2c00, .driver_data = SAA7134_BOARD_AVERMEDIA_A16AR, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1043, .subdevice = 0x4860, .driver_data = SAA7134_BOARD_ASUS_EUROPA2_HYBRID, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x11bd, .subdevice = 0x002f, .driver_data = SAA7134_BOARD_PINNACLE_PCTV_310i, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0x9715, .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa11b, .driver_data = SAA7134_BOARD_AVERMEDIA_STUDIO_507UA, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x4876, .driver_data = SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6700, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6701, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6702, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6703, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6704, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6705, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1110, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6706, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6707, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6708, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1150, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x6709, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0070, .subdevice = 0x670a, .driver_data = SAA7134_BOARD_HAUPPAUGE_HVR1120, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x153b, .subdevice = 0x1172, .driver_data = SAA7134_BOARD_CINERGY_HT_PCMCIA, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2342, .driver_data = SAA7134_BOARD_ENCORE_ENLTV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1131, .subdevice = 0x2341, .driver_data = SAA7134_BOARD_ENCORE_ENLTV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x3016, .subdevice = 0x2344, .driver_data = SAA7134_BOARD_ENCORE_ENLTV, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1131, .subdevice = 0x230f, .driver_data = SAA7134_BOARD_ENCORE_ENLTV_FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x1a7f, .subdevice = 0x2008, .driver_data = SAA7134_BOARD_ENCORE_ENLTV_FM53, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1a7f, .subdevice = 0x2108, .driver_data = SAA7134_BOARD_ENCORE_ENLTV_FM3, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x153b, .subdevice = 0x1175, .driver_data = SAA7134_BOARD_CINERGY_HT_PCI, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf31e, .driver_data = SAA7134_BOARD_AVERMEDIA_M102, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x4E42, /* MSI */ .subdevice = 0x0306, /* TV@nywhere DUO */ .driver_data = SAA7134_BOARD_FLYDVBTDUO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x4871, .driver_data = SAA7134_BOARD_ASUS_P7131_4871, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x4857, /* REV:1.00 */ .driver_data = SAA7134_BOARD_ASUSTeK_TIGER, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x0919, /* SinoVideo PCI 2309 Proteus (7134) */ .subdevice = 0x2003, /* OEM cardbus */ .driver_data = SAA7134_BOARD_SABRENT_TV_PCB05, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2304, .driver_data = SAA7134_BOARD_10MOONSTVMASTER3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf01d, /* AVerTV DVB-T Super 007 */ .driver_data = SAA7134_BOARD_AVERMEDIA_SUPER_007, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0000, .subdevice = 0x4016, .driver_data = SAA7134_BOARD_BEHOLD_401, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x0000, .subdevice = 0x4036, .driver_data = SAA7134_BOARD_BEHOLD_403, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x0000, .subdevice = 0x4037, .driver_data = SAA7134_BOARD_BEHOLD_403FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0000, .subdevice = 0x4050, .driver_data = SAA7134_BOARD_BEHOLD_405, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0000, .subdevice = 0x4051, .driver_data = SAA7134_BOARD_BEHOLD_405FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x0000, .subdevice = 0x4070, .driver_data = SAA7134_BOARD_BEHOLD_407, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x0000, .subdevice = 0x4071, .driver_data = SAA7134_BOARD_BEHOLD_407FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0000, .subdevice = 0x4090, .driver_data = SAA7134_BOARD_BEHOLD_409, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0000, .subdevice = 0x505B, .driver_data = SAA7134_BOARD_BEHOLD_505RDS_MK5, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x0000, .subdevice = 0x5051, .driver_data = SAA7134_BOARD_BEHOLD_505RDS_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x5ace, .subdevice = 0x5050, .driver_data = SAA7134_BOARD_BEHOLD_505FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0000, .subdevice = 0x5071, .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0000, .subdevice = 0x507B, .driver_data = SAA7134_BOARD_BEHOLD_507RDS_MK5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5ace, .subdevice = 0x5070, .driver_data = SAA7134_BOARD_BEHOLD_507_9FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x5090, .driver_data = SAA7134_BOARD_BEHOLD_507_9FM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x0000, .subdevice = 0x5201, .driver_data = SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5ace, .subdevice = 0x6070, .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5ace, .subdevice = 0x6071, .driver_data = SAA7134_BOARD_BEHOLD_607FM_MK5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5ace, .subdevice = 0x6072, .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x5ace, .subdevice = 0x6073, .driver_data = SAA7134_BOARD_BEHOLD_607RDS_MK5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6090, .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6091, .driver_data = SAA7134_BOARD_BEHOLD_609FM_MK5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6092, .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK3, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6093, .driver_data = SAA7134_BOARD_BEHOLD_609RDS_MK5, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6190, .driver_data = SAA7134_BOARD_BEHOLD_M6, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6193, .driver_data = SAA7134_BOARD_BEHOLD_M6_EXTRA, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6191, .driver_data = SAA7134_BOARD_BEHOLD_M63, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x4e42, .subdevice = 0x3502, .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1822, /*Twinhan Technology Co. Ltd*/ .subdevice = 0x0022, .driver_data = SAA7134_BOARD_TWINHAN_DTV_DVB_3056, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x16be, .subdevice = 0x0010, /* Medion version CTX953_V.1.4.3 */ .driver_data = SAA7134_BOARD_CREATIX_CTX953, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1462, /* MSI */ .subdevice = 0x8625, /* TV@nywhere A/D v1.1 */ .driver_data = SAA7134_BOARD_MSI_TVANYWHERE_AD11, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf436, .driver_data = SAA7134_BOARD_AVERMEDIA_CARDBUS_506, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf936, .driver_data = SAA7134_BOARD_AVERMEDIA_A16D, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xa836, .driver_data = SAA7134_BOARD_AVERMEDIA_M115, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x185b, .subdevice = 0xc900, .driver_data = SAA7134_BOARD_VIDEOMATE_T750, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA7135HL */ .subvendor = 0x1421, .subdevice = 0x0380, .driver_data = SAA7134_BOARD_ADS_INSTANT_HDTV_PCI, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5169, .subdevice = 0x1502, .driver_data = SAA7134_BOARD_FLYTVPLATINUM_MINI, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x6290, .driver_data = SAA7134_BOARD_BEHOLD_H6, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf636, .driver_data = SAA7134_BOARD_AVERMEDIA_M103, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf736, .driver_data = SAA7134_BOARD_AVERMEDIA_M103, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1043, .subdevice = 0x4878, /* REV:1.02G */ .driver_data = SAA7134_BOARD_ASUSTeK_TIGER_3IN1, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x17de, .subdevice = 0x7128, .driver_data = SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x17de, .subdevice = 0xb136, .driver_data = SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x1461, /* Avermedia Technologies Inc */ .subdevice = 0xf31d, .driver_data = SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x185b, .subdevice = 0xc900, .driver_data = SAA7134_BOARD_VIDEOMATE_S350, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ .subdevice = 0x7595, .driver_data = SAA7134_BOARD_BEHOLD_X7, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x19d1, /* RoverMedia */ .subdevice = 0x0138, /* LifeView FlyTV Prime30 OEM */ .driver_data = SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0x2004, .driver_data = SAA7134_BOARD_ZOLID_HYBRID_PCI, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x1043, .subdevice = 0x4847, .driver_data = SAA7134_BOARD_ASUS_EUROPA_HYBRID, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x107d, .subdevice = 0x6655, .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x13c2, .subdevice = 0x2804, .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ .subdevice = 0x7190, .driver_data = SAA7134_BOARD_BEHOLD_H7, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ .subdevice = 0x7090, .driver_data = SAA7134_BOARD_BEHOLD_A7, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7135, .subvendor = 0x185b, .subdevice = 0xc900, .driver_data = SAA7134_BOARD_VIDEOMATE_M1F, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5ace, .subdevice = 0x5030, .driver_data = SAA7134_BOARD_BEHOLD_503FM, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = 0x5ace, .subdevice = 0x5010, .driver_data = SAA7134_BOARD_BEHOLD_501, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = 0x17de, .subdevice = 0xd136, .driver_data = SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x6000, .subdevice = 0x0811, .driver_data = SAA7134_BOARD_SENSORAY811_911, }, { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x6000, .subdevice = 0x0911, .driver_data = SAA7134_BOARD_SENSORAY811_911, }, { /* --- boards without eeprom + subsystem ID --- */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0, .driver_data = SAA7134_BOARD_NOAUTO, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_VENDOR_ID_PHILIPS, .subdevice = 0, .driver_data = SAA7134_BOARD_NOAUTO, },{ /* --- default catch --- */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7130, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, },{ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7135, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, },{ /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, saa7134_pci_tbl); /* ----------------------------------------------------------- */ /* flyvideo tweaks */ static void board_flyvideo(struct saa7134_dev *dev) { printk("%s: there are different flyvideo cards with different tuners\n" "%s: out there, you might have to use the tuner=<nr> insmod\n" "%s: option to override the default value.\n", dev->name, dev->name, dev->name); } static int saa7134_xc2028_callback(struct saa7134_dev *dev, int command, int arg) { switch (command) { case XC2028_TUNER_RESET: saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00000000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00008000); switch (dev->board) { case SAA7134_BOARD_AVERMEDIA_CARDBUS_506: case SAA7134_BOARD_AVERMEDIA_M103: saa7134_set_gpio(dev, 23, 0); msleep(10); saa7134_set_gpio(dev, 23, 1); break; case SAA7134_BOARD_AVERMEDIA_A16D: saa7134_set_gpio(dev, 21, 0); msleep(10); saa7134_set_gpio(dev, 21, 1); break; case SAA7134_BOARD_AVERMEDIA_A700_HYBRID: saa7134_set_gpio(dev, 18, 0); msleep(10); saa7134_set_gpio(dev, 18, 1); break; case SAA7134_BOARD_VIDEOMATE_T750: saa7134_set_gpio(dev, 20, 0); msleep(10); saa7134_set_gpio(dev, 20, 1); break; } return 0; } return -EINVAL; } static int saa7134_xc5000_callback(struct saa7134_dev *dev, int command, int arg) { switch (dev->board) { case SAA7134_BOARD_BEHOLD_X7: case SAA7134_BOARD_BEHOLD_H7: case SAA7134_BOARD_BEHOLD_A7: if (command == XC5000_TUNER_RESET) { /* Down and UP pheripherial RESET pin for reset all chips */ saa_writeb(SAA7134_SPECIAL_MODE, 0x00); msleep(10); saa_writeb(SAA7134_SPECIAL_MODE, 0x01); msleep(10); } break; default: saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x06e20000, 0x06e20000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x06a20000, 0x06a20000); saa_andorl(SAA7133_ANALOG_IO_SELECT >> 2, 0x02, 0x02); saa_andorl(SAA7134_ANALOG_IN_CTRL1 >> 2, 0x81, 0x81); saa_andorl(SAA7134_AUDIO_CLOCK0 >> 2, 0x03187de7, 0x03187de7); saa_andorl(SAA7134_AUDIO_PLL_CTRL >> 2, 0x03, 0x03); saa_andorl(SAA7134_AUDIO_CLOCKS_PER_FIELD0 >> 2, 0x0001e000, 0x0001e000); break; } return 0; } static int saa7134_tda8290_827x_callback(struct saa7134_dev *dev, int command, int arg) { u8 sync_control; switch (command) { case 0: /* switch LNA gain through GPIO 22*/ saa7134_set_gpio(dev, 22, arg) ; break; case 1: /* vsync output at GPIO22. 50 / 60Hz */ saa_andorb(SAA7134_VIDEO_PORT_CTRL3, 0x80, 0x80); saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x03); if (arg == 1) sync_control = 11; else sync_control = 17; saa_writeb(SAA7134_VGATE_START, sync_control); saa_writeb(SAA7134_VGATE_STOP, sync_control + 1); saa_andorb(SAA7134_MISC_VGATE_MSB, 0x03, 0x00); break; default: return -EINVAL; } return 0; } static inline int saa7134_tda18271_hvr11x0_toggle_agc(struct saa7134_dev *dev, enum tda18271_mode mode) { /* toggle AGC switch through GPIO 26 */ switch (mode) { case TDA18271_ANALOG: saa7134_set_gpio(dev, 26, 0); break; case TDA18271_DIGITAL: saa7134_set_gpio(dev, 26, 1); break; default: return -EINVAL; } return 0; } static inline int saa7134_kworld_sbtvd_toggle_agc(struct saa7134_dev *dev, enum tda18271_mode mode) { /* toggle AGC switch through GPIO 27 */ switch (mode) { case TDA18271_ANALOG: saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000); saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000); msleep(20); break; case TDA18271_DIGITAL: saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000); saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000); msleep(20); saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000); saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000); msleep(30); break; default: return -EINVAL; } return 0; } static int saa7134_kworld_pc150u_toggle_agc(struct saa7134_dev *dev, enum tda18271_mode mode) { switch (mode) { case TDA18271_ANALOG: saa7134_set_gpio(dev, 18, 0); break; case TDA18271_DIGITAL: saa7134_set_gpio(dev, 18, 1); msleep(30); break; default: return -EINVAL; } return 0; } static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev, int command, int arg) { int ret = 0; switch (command) { case TDA18271_CALLBACK_CMD_AGC_ENABLE: /* 0 */ switch (dev->board) { case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2: ret = saa7134_tda18271_hvr11x0_toggle_agc(dev, arg); break; case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG: ret = saa7134_kworld_sbtvd_toggle_agc(dev, arg); break; case SAA7134_BOARD_KWORLD_PC150U: ret = saa7134_kworld_pc150u_toggle_agc(dev, arg); break; default: break; } break; default: ret = -EINVAL; break; } return ret; } static int saa7134_tda8290_callback(struct saa7134_dev *dev, int command, int arg) { int ret; switch (dev->board) { case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: case SAA7134_BOARD_AVERMEDIA_M733A: case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG: case SAA7134_BOARD_KWORLD_PC150U: case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2: /* tda8290 + tda18271 */ ret = saa7134_tda8290_18271_callback(dev, command, arg); break; default: /* tda8290 + tda827x */ ret = saa7134_tda8290_827x_callback(dev, command, arg); break; } return ret; } int saa7134_tuner_callback(void *priv, int component, int command, int arg) { struct saa7134_dev *dev = priv; if (dev != NULL) { switch (dev->tuner_type) { case TUNER_PHILIPS_TDA8290: return saa7134_tda8290_callback(dev, command, arg); case TUNER_XC2028: return saa7134_xc2028_callback(dev, command, arg); case TUNER_XC5000: return saa7134_xc5000_callback(dev, command, arg); } } else { printk(KERN_ERR "saa7134: Error - device struct undefined.\n"); return -EINVAL; } return -EINVAL; } EXPORT_SYMBOL(saa7134_tuner_callback); /* ----------------------------------------------------------- */ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data) { struct tveeprom tv; tveeprom_hauppauge_analog(&dev->i2c_client, &tv, eeprom_data); /* Make sure we support the board model */ switch (tv.model) { case 67019: /* WinTV-HVR1110 (Retail, IR Blaster, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67109: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ case 67201: /* WinTV-HVR1150 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67301: /* WinTV-HVR1000 (Retail, IR Receive, analog, no FM, SVid/Comp, 3.5mm audio in) */ case 67209: /* WinTV-HVR1110 (Retail, IR Receive, hybrid, FM, SVid/Comp, 3.5mm audio in) */ case 67559: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ case 67569: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM) */ case 67579: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM) */ case 67589: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ case 67599: /* WinTV-HVR1110 (OEM, no IR, hybrid, no FM, SVid/Comp, RCA aud) */ case 67651: /* WinTV-HVR1150 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */ break; default: printk(KERN_WARNING "%s: warning: " "unknown hauppauge model #%d\n", dev->name, tv.model); break; } printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n", dev->name, tv.model); } /* ----------------------------------------------------------- */ int saa7134_board_init1(struct saa7134_dev *dev) { /* Always print gpio, often manufacturers encode tuner type and other info. */ saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0); dev->gpio_value = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2); printk(KERN_INFO "%s: board init: gpio is %x\n", dev->name, dev->gpio_value); switch (dev->board) { case SAA7134_BOARD_FLYVIDEO2000: case SAA7134_BOARD_FLYVIDEO3000: case SAA7134_BOARD_FLYVIDEO3000_NTSC: dev->has_remote = SAA7134_REMOTE_GPIO; board_flyvideo(dev); break; case SAA7134_BOARD_FLYTVPLATINUM_MINI2: case SAA7134_BOARD_FLYTVPLATINUM_FM: case SAA7134_BOARD_CINERGY400: case SAA7134_BOARD_CINERGY600: case SAA7134_BOARD_CINERGY600_MK3: case SAA7134_BOARD_ECS_TVP3XP: case SAA7134_BOARD_ECS_TVP3XP_4CB5: case SAA7134_BOARD_ECS_TVP3XP_4CB6: case SAA7134_BOARD_MD2819: case SAA7134_BOARD_KWORLD_VSTREAM_XPERT: case SAA7134_BOARD_KWORLD_XPERT: case SAA7134_BOARD_AVERMEDIA_STUDIO_305: case SAA7134_BOARD_AVERMEDIA_STUDIO_505: case SAA7134_BOARD_AVERMEDIA_305: case SAA7134_BOARD_AVERMEDIA_STUDIO_307: case SAA7134_BOARD_AVERMEDIA_307: case SAA7134_BOARD_AVERMEDIA_STUDIO_507: case SAA7134_BOARD_AVERMEDIA_GO_007_FM: case SAA7134_BOARD_AVERMEDIA_777: case SAA7134_BOARD_AVERMEDIA_M135A: /* case SAA7134_BOARD_SABRENT_SBTTVFM: */ /* not finished yet */ case SAA7134_BOARD_VIDEOMATE_TV_PVR: case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS: case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII: case SAA7134_BOARD_VIDEOMATE_M1F: case SAA7134_BOARD_VIDEOMATE_DVBT_300: case SAA7134_BOARD_VIDEOMATE_DVBT_200: case SAA7134_BOARD_VIDEOMATE_DVBT_200A: case SAA7134_BOARD_MANLI_MTV001: case SAA7134_BOARD_MANLI_MTV002: case SAA7134_BOARD_BEHOLD_409FM: case SAA7134_BOARD_AVACSSMARTTV: case SAA7134_BOARD_GOTVIEW_7135: case SAA7134_BOARD_KWORLD_TERMINATOR: case SAA7134_BOARD_SEDNA_PC_TV_CARDBUS: case SAA7134_BOARD_FLYDVBT_LR301: case SAA7134_BOARD_ASUSTeK_P7131_DUAL: case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA: case SAA7134_BOARD_ASUSTeK_P7131_ANALOG: case SAA7134_BOARD_FLYDVBTDUO: case SAA7134_BOARD_PROTEUS_2309: case SAA7134_BOARD_AVERMEDIA_A16AR: case SAA7134_BOARD_ENCORE_ENLTV: case SAA7134_BOARD_ENCORE_ENLTV_FM: case SAA7134_BOARD_ENCORE_ENLTV_FM53: case SAA7134_BOARD_ENCORE_ENLTV_FM3: case SAA7134_BOARD_10MOONSTVMASTER3: case SAA7134_BOARD_BEHOLD_401: case SAA7134_BOARD_BEHOLD_403: case SAA7134_BOARD_BEHOLD_403FM: case SAA7134_BOARD_BEHOLD_405: case SAA7134_BOARD_BEHOLD_405FM: case SAA7134_BOARD_BEHOLD_407: case SAA7134_BOARD_BEHOLD_407FM: case SAA7134_BOARD_BEHOLD_409: case SAA7134_BOARD_BEHOLD_505FM: case SAA7134_BOARD_BEHOLD_505RDS_MK5: case SAA7134_BOARD_BEHOLD_505RDS_MK3: case SAA7134_BOARD_BEHOLD_507_9FM: case SAA7134_BOARD_BEHOLD_507RDS_MK3: case SAA7134_BOARD_BEHOLD_507RDS_MK5: case SAA7134_BOARD_GENIUS_TVGO_A11MCE: case SAA7134_BOARD_REAL_ANGEL_220: case SAA7134_BOARD_KWORLD_PLUS_TV_ANALOG: case SAA7134_BOARD_AVERMEDIA_GO_007_FM_PLUS: case SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM: case SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S: dev->has_remote = SAA7134_REMOTE_GPIO; break; case SAA7134_BOARD_FLYDVBS_LR300: saa_writeb(SAA7134_GPIO_GPMODE3, 0x80); saa_writeb(SAA7134_GPIO_GPSTATUS2, 0x40); dev->has_remote = SAA7134_REMOTE_GPIO; break; case SAA7134_BOARD_MD5044: printk("%s: seems there are two different versions of the MD5044\n" "%s: (with the same ID) out there. If sound doesn't work for\n" "%s: you try the audio_clock_override=0x200000 insmod option.\n", dev->name,dev->name,dev->name); break; case SAA7134_BOARD_CINERGY400_CARDBUS: /* power-up tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00040000, 0x00040000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00040000, 0x00000000); break; case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL: /* this turns the remote control chip off to work around a bug in it */ saa_writeb(SAA7134_GPIO_GPMODE1, 0x80); saa_writeb(SAA7134_GPIO_GPSTATUS1, 0x80); break; case SAA7134_BOARD_MONSTERTV_MOBILE: /* power-up tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00040000, 0x00040000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00040000, 0x00000004); break; case SAA7134_BOARD_FLYDVBT_DUO_CARDBUS: /* turn the fan on */ saa_writeb(SAA7134_GPIO_GPMODE3, 0x08); saa_writeb(SAA7134_GPIO_GPSTATUS3, 0x06); break; case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS: saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08000000, 0x08000000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08000000, 0x00000000); break; case SAA7134_BOARD_AVERMEDIA_CARDBUS: case SAA7134_BOARD_AVERMEDIA_M115: /* power-down tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0xffffffff, 0); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0xffffffff, 0); msleep(10); /* power-up tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0xffffffff, 0xffffffff); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0xffffffff, 0xffffffff); msleep(10); break; case SAA7134_BOARD_AVERMEDIA_CARDBUS_501: /* power-down tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0); msleep(10); saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x08400000, 0x08400000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x08400000, 0x08400000); msleep(10); dev->has_remote = SAA7134_REMOTE_I2C; break; case SAA7134_BOARD_AVERMEDIA_CARDBUS_506: saa7134_set_gpio(dev, 23, 0); msleep(10); saa7134_set_gpio(dev, 23, 1); dev->has_remote = SAA7134_REMOTE_I2C; break; case SAA7134_BOARD_AVERMEDIA_M103: saa7134_set_gpio(dev, 23, 0); msleep(10); saa7134_set_gpio(dev, 23, 1); break; case SAA7134_BOARD_AVERMEDIA_A16D: saa7134_set_gpio(dev, 21, 0); msleep(10); saa7134_set_gpio(dev, 21, 1); msleep(1); dev->has_remote = SAA7134_REMOTE_GPIO; break; case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM: /* power-down tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x000A8004, 0x000A8004); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x000A8004, 0); msleep(10); /* power-up tuner chip */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x000A8004, 0x000A8004); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x000A8004, 0x000A8004); msleep(10); /* remote via GPIO */ dev->has_remote = SAA7134_REMOTE_GPIO; break; case SAA7134_BOARD_RTD_VFG7350: /* * Make sure Production Test Register at offset 0x1D1 is cleared * to take chip out of test mode. Clearing bit 4 (TST_EN_AOUT) * prevents pin 105 from remaining low; keeping pin 105 low * continually resets the SAA6752 chip. */ saa_writeb (SAA7134_PRODUCTION_TEST_MODE, 0x00); break; case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: dev->has_remote = SAA7134_REMOTE_GPIO; /* GPIO 26 high for digital, low for analog */ saa7134_set_gpio(dev, 26, 0); msleep(1); saa7134_set_gpio(dev, 22, 0); msleep(10); saa7134_set_gpio(dev, 22, 1); break; /* i2c remotes */ case SAA7134_BOARD_PINNACLE_PCTV_110i: case SAA7134_BOARD_PINNACLE_PCTV_310i: case SAA7134_BOARD_UPMOST_PURPLE_TV: case SAA7134_BOARD_MSI_TVATANYWHERE_PLUS: case SAA7134_BOARD_HAUPPAUGE_HVR1110: case SAA7134_BOARD_BEHOLD_607FM_MK3: case SAA7134_BOARD_BEHOLD_607FM_MK5: case SAA7134_BOARD_BEHOLD_609FM_MK3: case SAA7134_BOARD_BEHOLD_609FM_MK5: case SAA7134_BOARD_BEHOLD_607RDS_MK3: case SAA7134_BOARD_BEHOLD_607RDS_MK5: case SAA7134_BOARD_BEHOLD_609RDS_MK3: case SAA7134_BOARD_BEHOLD_609RDS_MK5: case SAA7134_BOARD_BEHOLD_M6: case SAA7134_BOARD_BEHOLD_M63: case SAA7134_BOARD_BEHOLD_M6_EXTRA: case SAA7134_BOARD_BEHOLD_H6: case SAA7134_BOARD_BEHOLD_X7: case SAA7134_BOARD_BEHOLD_H7: case SAA7134_BOARD_BEHOLD_A7: case SAA7134_BOARD_KWORLD_PC150U: dev->has_remote = SAA7134_REMOTE_I2C; break; case SAA7134_BOARD_AVERMEDIA_A169_B: printk("%s: %s: dual saa713x broadcast decoders\n" "%s: Sorry, none of the inputs to this chip are supported yet.\n" "%s: Dual decoder functionality is disabled for now, use the other chip.\n", dev->name,card(dev).name,dev->name,dev->name); break; case SAA7134_BOARD_AVERMEDIA_M102: /* enable tuner */ dev->has_remote = SAA7134_REMOTE_GPIO; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x8c040007, 0x8c040007); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0c0007cd, 0x0c0007cd); break; case SAA7134_BOARD_AVERMEDIA_A700_HYBRID: case SAA7134_BOARD_AVERMEDIA_A700_PRO: /* write windows gpio values */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x80040100, 0x80040100); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x80040100, 0x00040100); break; case SAA7134_BOARD_VIDEOMATE_S350: dev->has_remote = SAA7134_REMOTE_GPIO; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0000C000, 0x0000C000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0000C000, 0x0000C000); break; case SAA7134_BOARD_AVERMEDIA_M733A: saa7134_set_gpio(dev, 1, 1); msleep(10); saa7134_set_gpio(dev, 1, 0); msleep(10); saa7134_set_gpio(dev, 1, 1); dev->has_remote = SAA7134_REMOTE_GPIO; break; case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2: /* enable LGS-8G75 */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0e050000, 0x0c050000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0e050000, 0x0c050000); break; case SAA7134_BOARD_VIDEOMATE_T750: /* enable the analog tuner */ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00008000, 0x00008000); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00008000); break; } return 0; } static void saa7134_tuner_setup(struct saa7134_dev *dev) { struct tuner_setup tun_setup; unsigned int mode_mask = T_RADIO | T_ANALOG_TV; memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.tuner_callback = saa7134_tuner_callback; if (saa7134_boards[dev->board].radio_type != UNSET) { tun_setup.type = saa7134_boards[dev->board].radio_type; tun_setup.addr = saa7134_boards[dev->board].radio_addr; tun_setup.mode_mask = T_RADIO; saa_call_all(dev, tuner, s_type_addr, &tun_setup); mode_mask &= ~T_RADIO; } if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type != UNSET)) { tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; tun_setup.config = saa7134_boards[dev->board].tuner_config; tun_setup.tuner_callback = saa7134_tuner_callback; tun_setup.mode_mask = mode_mask; saa_call_all(dev, tuner, s_type_addr, &tun_setup); } if (dev->tda9887_conf) { struct v4l2_priv_tun_config tda9887_cfg; tda9887_cfg.tuner = TUNER_TDA9887; tda9887_cfg.priv = &dev->tda9887_conf; saa_call_all(dev, tuner, s_config, &tda9887_cfg); } if (dev->tuner_type == TUNER_XC2028) { struct v4l2_priv_tun_config xc2028_cfg; struct xc2028_ctrl ctl; memset(&xc2028_cfg, 0, sizeof(xc2028_cfg)); memset(&ctl, 0, sizeof(ctl)); ctl.fname = XC2028_DEFAULT_FIRMWARE; ctl.max_len = 64; switch (dev->board) { case SAA7134_BOARD_AVERMEDIA_A16D: case SAA7134_BOARD_AVERMEDIA_CARDBUS_506: case SAA7134_BOARD_AVERMEDIA_M103: case SAA7134_BOARD_AVERMEDIA_A700_HYBRID: ctl.demod = XC3028_FE_ZARLINK456; break; default: ctl.demod = XC3028_FE_OREN538; ctl.mts = 1; } xc2028_cfg.tuner = TUNER_XC2028; xc2028_cfg.priv = &ctl; saa_call_all(dev, tuner, s_config, &xc2028_cfg); } } /* stuff which needs working i2c */ int saa7134_board_init2(struct saa7134_dev *dev) { unsigned char buf; int board; /* Put here the code that enables the chips that are needed for analog mode and doesn't depend on the tuner attachment. It is also a good idea to get tuner type from eeprom, etc before initializing tuner, since we can avoid loading tuner driver on devices that has TUNER_ABSENT */ switch (dev->board) { case SAA7134_BOARD_BMK_MPEX_NOTUNER: case SAA7134_BOARD_BMK_MPEX_TUNER: /* Checks if the device has a tuner at 0x60 addr If the device doesn't have a tuner, TUNER_ABSENT will be used at tuner_type, avoiding loading tuner without needing it */ dev->i2c_client.addr = 0x60; board = (i2c_master_recv(&dev->i2c_client, &buf, 0) < 0) ? SAA7134_BOARD_BMK_MPEX_NOTUNER : SAA7134_BOARD_BMK_MPEX_TUNER; if (board == dev->board) break; dev->board = board; printk("%s: board type fixup: %s\n", dev->name, saa7134_boards[dev->board].name); dev->tuner_type = saa7134_boards[dev->board].tuner_type; break; case SAA7134_BOARD_MD7134: { u8 subaddr; u8 data[3]; int ret, tuner_t; struct i2c_msg msg[] = {{.addr=0x50, .flags=0, .buf=&subaddr, .len = 1}, {.addr=0x50, .flags=I2C_M_RD, .buf=data, .len = 3}}; subaddr= 0x14; tuner_t = 0; /* Retrieve device data from eeprom, checking for the proper tuner_type. */ ret = i2c_transfer(&dev->i2c_adap, msg, 2); if (ret != 2) { printk(KERN_ERR "EEPROM read failure\n"); } else if ((data[0] != 0) && (data[0] != 0xff)) { /* old config structure */ subaddr = data[0] + 2; msg[1].len = 2; i2c_transfer(&dev->i2c_adap, msg, 2); tuner_t = (data[0] << 8) + data[1]; switch (tuner_t){ case 0x0103: dev->tuner_type = TUNER_PHILIPS_PAL; break; case 0x010C: dev->tuner_type = TUNER_PHILIPS_FM1216ME_MK3; break; default: printk(KERN_ERR "%s Can't determine tuner type %x from EEPROM\n", dev->name, tuner_t); } } else if ((data[1] != 0) && (data[1] != 0xff)) { /* new config structure */ subaddr = data[1] + 1; msg[1].len = 1; i2c_transfer(&dev->i2c_adap, msg, 2); subaddr = data[0] + 1; msg[1].len = 2; i2c_transfer(&dev->i2c_adap, msg, 2); tuner_t = (data[1] << 8) + data[0]; switch (tuner_t) { case 0x0005: dev->tuner_type = TUNER_PHILIPS_FM1216ME_MK3; break; case 0x001d: dev->tuner_type = TUNER_PHILIPS_FMD1216ME_MK3; printk(KERN_INFO "%s Board has DVB-T\n", dev->name); break; default: printk(KERN_ERR "%s Can't determine tuner type %x from EEPROM\n", dev->name, tuner_t); } } else { printk(KERN_ERR "%s unexpected config structure\n", dev->name); } printk(KERN_INFO "%s Tuner type is %d\n", dev->name, dev->tuner_type); break; } case SAA7134_BOARD_PHILIPS_EUROPA: if (dev->autodetected && (dev->eedata[0x41] == 0x1c)) { /* Reconfigure board as Snake reference design */ dev->board = SAA7134_BOARD_PHILIPS_SNAKE; dev->tuner_type = saa7134_boards[dev->board].tuner_type; printk(KERN_INFO "%s: Reconfigured board as %s\n", dev->name, saa7134_boards[dev->board].name); break; } /* break intentionally omitted */ case SAA7134_BOARD_VIDEOMATE_DVBT_300: case SAA7134_BOARD_ASUS_EUROPA2_HYBRID: case SAA7134_BOARD_ASUS_EUROPA_HYBRID: case SAA7134_BOARD_TECHNOTREND_BUDGET_T3000: { /* The Philips EUROPA based hybrid boards have the tuner connected through the channel decoder. We have to make it transparent to find it */ u8 data[] = { 0x07, 0x02}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_PHILIPS_TIGER: case SAA7134_BOARD_PHILIPS_TIGER_S: { u8 data[] = { 0x3c, 0x33, 0x60}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; if (dev->autodetected && (dev->eedata[0x49] == 0x50)) { dev->board = SAA7134_BOARD_PHILIPS_TIGER_S; printk(KERN_INFO "%s: Reconfigured board as %s\n", dev->name, saa7134_boards[dev->board].name); } if (dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) { dev->tuner_type = TUNER_PHILIPS_TDA8290; data[2] = 0x68; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_ASUSTeK_TVFM7135: /* The card below is detected as card=53, but is different */ if (dev->autodetected && (dev->eedata[0x27] == 0x03)) { dev->board = SAA7134_BOARD_ASUSTeK_P7131_ANALOG; printk(KERN_INFO "%s: P7131 analog only, using " "entry of %s\n", dev->name, saa7134_boards[dev->board].name); /* IR init has already happened for other cards, so * we have to catch up. */ dev->has_remote = SAA7134_REMOTE_GPIO; saa7134_input_init1(dev); } break; case SAA7134_BOARD_HAUPPAUGE_HVR1150: case SAA7134_BOARD_HAUPPAUGE_HVR1120: hauppauge_eeprom(dev, dev->eedata+0x80); break; case SAA7134_BOARD_HAUPPAUGE_HVR1110: hauppauge_eeprom(dev, dev->eedata+0x80); /* break intentionally omitted */ case SAA7134_BOARD_PINNACLE_PCTV_310i: case SAA7134_BOARD_KWORLD_DVBT_210: case SAA7134_BOARD_TEVION_DVBT_220RF: case SAA7134_BOARD_ASUSTeK_TIGER: case SAA7134_BOARD_ASUSTeK_P7131_DUAL: case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA: case SAA7134_BOARD_MEDION_MD8800_QUADRO: case SAA7134_BOARD_AVERMEDIA_SUPER_007: case SAA7134_BOARD_TWINHAN_DTV_DVB_3056: case SAA7134_BOARD_CREATIX_CTX953: { /* this is a hybrid board, initialize to analog mode * and configure firmware eeprom address */ u8 data[] = { 0x3c, 0x33, 0x60}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_ASUSTeK_TIGER_3IN1: { u8 data[] = { 0x3c, 0x33, 0x60}; struct i2c_msg msg = {.addr = 0x0b, .flags = 0, .buf = data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_FLYDVB_TRIO: { u8 temp = 0; int rc; u8 data[] = { 0x3c, 0x33, 0x62}; struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); /* * send weak up message to pic16C505 chip * @ LifeView FlyDVB Trio */ msg.buf = &temp; msg.addr = 0x0b; msg.len = 1; if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) { printk(KERN_WARNING "%s: send wake up byte to pic16C505" "(IR chip) failed\n", dev->name); } else { msg.flags = I2C_M_RD; rc = i2c_transfer(&dev->i2c_adap, &msg, 1); printk(KERN_INFO "%s: probe IR chip @ i2c 0x%02x: %s\n", dev->name, msg.addr, (1 == rc) ? "yes" : "no"); if (rc == 1) dev->has_remote = SAA7134_REMOTE_I2C; } break; } case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331: case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS: { /* initialize analog mode */ u8 data[] = { 0x3c, 0x33, 0x6a}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_CINERGY_HT_PCMCIA: case SAA7134_BOARD_CINERGY_HT_PCI: { /* initialize analog mode */ u8 data[] = { 0x3c, 0x33, 0x68}; struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)}; i2c_transfer(&dev->i2c_adap, &msg, 1); break; } case SAA7134_BOARD_VIDEOMATE_DVBT_200: case SAA7134_BOARD_VIDEOMATE_DVBT_200A: /* The T200 and the T200A share the same pci id. Consequently, * we are going to query eeprom to try to find out which one we * are actually looking at. */ /* Don't do this if the board was specifically selected with an * insmod option or if we have the default configuration T200*/ if (!dev->autodetected || (dev->eedata[0x41] == 0xd0)) break; if (dev->eedata[0x41] == 0x02) { /* Reconfigure board as T200A */ dev->board = SAA7134_BOARD_VIDEOMATE_DVBT_200A; dev->tuner_type = saa7134_boards[dev->board].tuner_type; dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf; printk(KERN_INFO "%s: Reconfigured board as %s\n", dev->name, saa7134_boards[dev->board].name); } else { printk(KERN_WARNING "%s: Unexpected tuner type info: %x in eeprom\n", dev->name, dev->eedata[0x41]); break; } break; case SAA7134_BOARD_ADS_INSTANT_HDTV_PCI: case SAA7134_BOARD_KWORLD_ATSC110: { struct i2c_msg msg = { .addr = 0x0a, .flags = 0 }; int i; static u8 buffer[][2] = { { 0x10, 0x12 }, { 0x13, 0x04 }, { 0x16, 0x00 }, { 0x14, 0x04 }, { 0x17, 0x00 }, }; for (i = 0; i < ARRAY_SIZE(buffer); i++) { msg.buf = &buffer[i][0]; msg.len = ARRAY_SIZE(buffer[0]); if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) printk(KERN_WARNING "%s: Unable to enable tuner(%i).\n", dev->name, i); } break; } case SAA7134_BOARD_BEHOLD_H6: { u8 data[] = { 0x09, 0x9f, 0x86, 0x11}; struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data)}; /* The tuner TUNER_PHILIPS_FMD1216MEX_MK3 after hardware */ /* start has disabled IF and enabled DVB-T. When saa7134 */ /* scan I2C devices it not detect IF tda9887 and can`t */ /* watch TV without software reboot. For solve this problem */ /* switch the tuner to analog TV mode manually. */ if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) printk(KERN_WARNING "%s: Unable to enable IF of the tuner.\n", dev->name); break; } case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG: saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000); saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000); saa7134_set_gpio(dev, 27, 0); break; } /* switch() */ /* initialize tuner */ if (TUNER_ABSENT != dev->tuner_type) { int has_demod = (dev->tda9887_conf & TDA9887_PRESENT); /* Note: radio tuner address is always filled in, so we do not need to probe for a radio tuner device. */ if (dev->radio_type != UNSET) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", dev->radio_addr, NULL); if (has_demod) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == ADDR_UNSET) { enum v4l2_i2c_tuner_type type = has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", dev->tuner_addr, NULL); } } saa7134_tuner_setup(dev); switch (dev->board) { case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM: case SAA7134_BOARD_AVERMEDIA_CARDBUS_501: { struct v4l2_priv_tun_config tea5767_cfg; struct tea5767_ctrl ctl; dev->i2c_client.addr = 0xC0; /* set TEA5767(analog FM) defines */ memset(&ctl, 0, sizeof(ctl)); ctl.xtal_freq = TEA5767_HIGH_LO_13MHz; tea5767_cfg.tuner = TUNER_TEA5767; tea5767_cfg.priv = &ctl; saa_call_all(dev, tuner, s_config, &tea5767_cfg); break; } } /* switch() */ return 0; }
gpl-2.0
nobodyAtall/msm7x30-3.4.x-nAa
fs/xfs/xfs_dquot.c
4808
26701
/* * Copyright (c) 2000-2003 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_trans_space.h" #include "xfs_trans_priv.h" #include "xfs_qm.h" #include "xfs_trace.h" /* * Lock order: * * ip->i_lock * qi->qi_tree_lock * dquot->q_qlock (xfs_dqlock() and friends) * dquot->q_flush (xfs_dqflock() and friends) * qi->qi_lru_lock * * If two dquots need to be locked the order is user before group/project, * otherwise by the lowest id first, see xfs_dqlock2. */ #ifdef DEBUG xfs_buftarg_t *xfs_dqerror_target; int xfs_do_dqerror; int xfs_dqreq_num; int xfs_dqerror_mod = 33; #endif struct kmem_zone *xfs_qm_dqtrxzone; static struct kmem_zone *xfs_qm_dqzone; static struct lock_class_key xfs_dquot_other_class; /* * This is called to free all the memory associated with a dquot */ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { ASSERT(list_empty(&dqp->q_lru)); mutex_destroy(&dqp->q_qlock); kmem_zone_free(xfs_qm_dqzone, dqp); XFS_STATS_DEC(xs_qm_dquot); } /* * If default limits are in force, push them into the dquot now. * We overwrite the dquot limits only if they are zero and this * is not the root dquot. */ void xfs_qm_adjust_dqlimits( xfs_mount_t *mp, xfs_disk_dquot_t *d) { xfs_quotainfo_t *q = mp->m_quotainfo; ASSERT(d->d_id); if (q->qi_bsoftlimit && !d->d_blk_softlimit) d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); if (q->qi_bhardlimit && !d->d_blk_hardlimit) d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); if (q->qi_isoftlimit && !d->d_ino_softlimit) d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); if (q->qi_ihardlimit && !d->d_ino_hardlimit) d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); } /* * Check the limits and timers of a dquot and start or reset timers * if necessary. * This gets called even when quota enforcement is OFF, which makes our * life a little less complicated. (We just don't reject any quota * reservations in that case, when enforcement is off). * We also return 0 as the values of the timers in Q_GETQUOTA calls, when * enforcement's off. * In contrast, warnings are a little different in that they don't * 'automatically' get started when limits get exceeded. They do * get reset to zero, however, when we find the count to be under * the soft limit (they are only ever set non-zero via userspace). */ void xfs_qm_adjust_dqtimers( xfs_mount_t *mp, xfs_disk_dquot_t *d) { ASSERT(d->d_id); #ifdef DEBUG if (d->d_blk_hardlimit) ASSERT(be64_to_cpu(d->d_blk_softlimit) <= be64_to_cpu(d->d_blk_hardlimit)); if (d->d_ino_hardlimit) ASSERT(be64_to_cpu(d->d_ino_softlimit) <= be64_to_cpu(d->d_ino_hardlimit)); if (d->d_rtb_hardlimit) ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= be64_to_cpu(d->d_rtb_hardlimit)); #endif if (!d->d_btimer) { if ((d->d_blk_softlimit && (be64_to_cpu(d->d_bcount) > be64_to_cpu(d->d_blk_softlimit))) || (d->d_blk_hardlimit && (be64_to_cpu(d->d_bcount) > be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_btimelimit); } else { d->d_bwarns = 0; } } else { if ((!d->d_blk_softlimit || (be64_to_cpu(d->d_bcount) <= be64_to_cpu(d->d_blk_softlimit))) && (!d->d_blk_hardlimit || (be64_to_cpu(d->d_bcount) <= be64_to_cpu(d->d_blk_hardlimit)))) { d->d_btimer = 0; } } if (!d->d_itimer) { if ((d->d_ino_softlimit && (be64_to_cpu(d->d_icount) > be64_to_cpu(d->d_ino_softlimit))) || (d->d_ino_hardlimit && (be64_to_cpu(d->d_icount) > be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_itimelimit); } else { d->d_iwarns = 0; } } else { if ((!d->d_ino_softlimit || (be64_to_cpu(d->d_icount) <= be64_to_cpu(d->d_ino_softlimit))) && (!d->d_ino_hardlimit || (be64_to_cpu(d->d_icount) <= be64_to_cpu(d->d_ino_hardlimit)))) { d->d_itimer = 0; } } if (!d->d_rtbtimer) { if ((d->d_rtb_softlimit && (be64_to_cpu(d->d_rtbcount) > be64_to_cpu(d->d_rtb_softlimit))) || (d->d_rtb_hardlimit && (be64_to_cpu(d->d_rtbcount) > be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = cpu_to_be32(get_seconds() + mp->m_quotainfo->qi_rtbtimelimit); } else { d->d_rtbwarns = 0; } } else { if ((!d->d_rtb_softlimit || (be64_to_cpu(d->d_rtbcount) <= be64_to_cpu(d->d_rtb_softlimit))) && (!d->d_rtb_hardlimit || (be64_to_cpu(d->d_rtbcount) <= be64_to_cpu(d->d_rtb_hardlimit)))) { d->d_rtbtimer = 0; } } } /* * initialize a buffer full of dquots and log the whole thing */ STATIC void xfs_qm_init_dquot_blk( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dqid_t id, uint type, xfs_buf_t *bp) { struct xfs_quotainfo *q = mp->m_quotainfo; xfs_dqblk_t *d; int curid, i; ASSERT(tp); ASSERT(xfs_buf_islocked(bp)); d = bp->b_addr; /* * ID of the first dquot in the block - id's are zero based. */ curid = id - (id % q->qi_dqperchunk); ASSERT(curid >= 0); memset(d, 0, BBTOB(q->qi_dqchunklen)); for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); d->dd_diskdq.d_version = XFS_DQUOT_VERSION; d->dd_diskdq.d_id = cpu_to_be32(curid); d->dd_diskdq.d_flags = type; } xfs_trans_dquot_buf(tp, bp, (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : XFS_BLF_GDQUOT_BUF))); xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); } /* * Allocate a block and fill it with dquots. * This is called when the bmapi finds a hole. */ STATIC int xfs_qm_dqalloc( xfs_trans_t **tpp, xfs_mount_t *mp, xfs_dquot_t *dqp, xfs_inode_t *quotip, xfs_fileoff_t offset_fsb, xfs_buf_t **O_bpp) { xfs_fsblock_t firstblock; xfs_bmap_free_t flist; xfs_bmbt_irec_t map; int nmaps, error, committed; xfs_buf_t *bp; xfs_trans_t *tp = *tpp; ASSERT(tp != NULL); trace_xfs_dqalloc(dqp); /* * Initialize the bmap freelist prior to calling bmapi code. */ xfs_bmap_init(&flist, &firstblock); xfs_ilock(quotip, XFS_ILOCK_EXCL); /* * Return if this type of quotas is turned off while we didn't * have an inode lock */ if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { xfs_iunlock(quotip, XFS_ILOCK_EXCL); return (ESRCH); } xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); nmaps = 1; error = xfs_bmapi_write(tp, quotip, offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps, &flist); if (error) goto error0; ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); ASSERT(nmaps == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); /* * Keep track of the blkno to save a lookup later */ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); /* now we can just get the buffer (there's nothing to read yet) */ bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0); error = xfs_buf_geterror(bp); if (error) goto error1; /* * Make a chunk of dquots out of this buffer and log * the entire thing. */ xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), dqp->dq_flags & XFS_DQ_ALLTYPES, bp); /* * xfs_bmap_finish() may commit the current transaction and * start a second transaction if the freelist is not empty. * * Since we still want to modify this buffer, we need to * ensure that the buffer is not released on commit of * the first transaction and ensure the buffer is added to the * second transaction. * * If there is only one transaction then don't stop the buffer * from being released when it commits later on. */ xfs_trans_bhold(tp, bp); if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { goto error1; } if (committed) { tp = *tpp; xfs_trans_bjoin(tp, bp); } else { xfs_trans_bhold_release(tp, bp); } *O_bpp = bp; return 0; error1: xfs_bmap_cancel(&flist); error0: xfs_iunlock(quotip, XFS_ILOCK_EXCL); return (error); } /* * Maps a dquot to the buffer containing its on-disk version. * This returns a ptr to the buffer containing the on-disk dquot * in the bpp param, and a ptr to the on-disk dquot within that buffer */ STATIC int xfs_qm_dqtobp( xfs_trans_t **tpp, xfs_dquot_t *dqp, xfs_disk_dquot_t **O_ddpp, xfs_buf_t **O_bpp, uint flags) { xfs_bmbt_irec_t map; int nmaps = 1, error; xfs_buf_t *bp; xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); xfs_mount_t *mp = dqp->q_mount; xfs_disk_dquot_t *ddq; xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); xfs_trans_t *tp = (tpp ? *tpp : NULL); dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; xfs_ilock(quotip, XFS_ILOCK_SHARED); if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { /* * Return if this type of quotas is turned off while we * didn't have the quota inode lock. */ xfs_iunlock(quotip, XFS_ILOCK_SHARED); return ESRCH; } /* * Find the block map; no allocations yet */ error = xfs_bmapi_read(quotip, dqp->q_fileoffset, XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); xfs_iunlock(quotip, XFS_ILOCK_SHARED); if (error) return error; ASSERT(nmaps == 1); ASSERT(map.br_blockcount == 1); /* * Offset of dquot in the (fixed sized) dquot chunk. */ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * sizeof(xfs_dqblk_t); ASSERT(map.br_startblock != DELAYSTARTBLOCK); if (map.br_startblock == HOLESTARTBLOCK) { /* * We don't allocate unless we're asked to */ if (!(flags & XFS_QMOPT_DQALLOC)) return ENOENT; ASSERT(tp); error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, dqp->q_fileoffset, &bp); if (error) return error; tp = *tpp; } else { trace_xfs_dqtobp_read(dqp); /* * store the blkno etc so that we don't have to do the * mapping all the time */ dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0, &bp); if (error || !bp) return XFS_ERROR(error); } ASSERT(xfs_buf_islocked(bp)); /* * calculate the location of the dquot inside the buffer. */ ddq = bp->b_addr + dqp->q_bufoffset; /* * A simple sanity check in case we got a corrupted dquot... */ error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), "dqtobp"); if (error) { if (!(flags & XFS_QMOPT_DQREPAIR)) { xfs_trans_brelse(tp, bp); return XFS_ERROR(EIO); } } *O_bpp = bp; *O_ddpp = ddq; return (0); } /* * Read in the ondisk dquot using dqtobp() then copy it to an incore version, * and release the buffer immediately. * * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. */ int xfs_qm_dqread( struct xfs_mount *mp, xfs_dqid_t id, uint type, uint flags, struct xfs_dquot **O_dqpp) { struct xfs_dquot *dqp; struct xfs_disk_dquot *ddqp; struct xfs_buf *bp; struct xfs_trans *tp = NULL; int error; int cancelflags = 0; dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&dqp->q_flush); complete(&dqp->q_flush); /* * Make sure group quotas have a different lock class than user * quotas. */ if (!(type & XFS_DQ_USER)) lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); XFS_STATS_INC(xs_qm_dquot); trace_xfs_dqread(dqp); if (flags & XFS_QMOPT_DQALLOC) { tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), XFS_WRITE_LOG_RES(mp) + /* * Round the chunklen up to the next multiple * of 128 (buf log item chunk size)). */ BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128, 0, XFS_TRANS_PERM_LOG_RES, XFS_WRITE_LOG_COUNT); if (error) goto error1; cancelflags = XFS_TRANS_RELEASE_LOG_RES; } /* * get a pointer to the on-disk dquot and the buffer containing it * dqp already knows its own type (GROUP/USER). */ error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); if (error) { /* * This can happen if quotas got turned off (ESRCH), * or if the dquot didn't exist on disk and we ask to * allocate (ENOENT). */ trace_xfs_dqread_fail(dqp); cancelflags |= XFS_TRANS_ABORT; goto error1; } /* copy everything from disk dquot to the incore dquot */ memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); xfs_qm_dquot_logitem_init(dqp); /* * Reservation counters are defined as reservation plus current usage * to avoid having to add every time. */ dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); /* Mark the buf so that this will stay incore a little longer */ xfs_buf_set_ref(bp, XFS_DQUOT_REF); /* * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) * So we need to release with xfs_trans_brelse(). * The strategy here is identical to that of inodes; we lock * the dquot in xfs_qm_dqget() before making it accessible to * others. This is because dquots, like inodes, need a good level of * concurrency, and we don't want to take locks on the entire buffers * for dquot accesses. * Note also that the dquot buffer may even be dirty at this point, if * this particular dquot was repaired. We still aren't afraid to * brelse it because we have the changes incore. */ ASSERT(xfs_buf_islocked(bp)); xfs_trans_brelse(tp, bp); if (tp) { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error0; } *O_dqpp = dqp; return error; error1: if (tp) xfs_trans_cancel(tp, cancelflags); error0: xfs_qm_dqdestroy(dqp); *O_dqpp = NULL; return error; } /* * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a * a locked dquot, doing an allocation (if requested) as needed. * When both an inode and an id are given, the inode's id takes precedence. * That is, if the id changes while we don't hold the ilock inside this * function, the new dquot is returned, not necessarily the one requested * in the id argument. */ int xfs_qm_dqget( xfs_mount_t *mp, xfs_inode_t *ip, /* locked inode (optional) */ xfs_dqid_t id, /* uid/projid/gid depending on type */ uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ { struct xfs_quotainfo *qi = mp->m_quotainfo; struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); struct xfs_dquot *dqp; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { return (ESRCH); } #ifdef DEBUG if (xfs_do_dqerror) { if ((xfs_dqerror_target == mp->m_ddev_targp) && (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { xfs_debug(mp, "Returning error in dqget"); return (EIO); } } ASSERT(type == XFS_DQ_USER || type == XFS_DQ_PROJ || type == XFS_DQ_GROUP); if (ip) { ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_inode_dquot(ip, type) == NULL); } #endif restart: mutex_lock(&qi->qi_tree_lock); dqp = radix_tree_lookup(tree, id); if (dqp) { xfs_dqlock(dqp); if (dqp->dq_flags & XFS_DQ_FREEING) { xfs_dqunlock(dqp); mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_freeing(dqp); delay(1); goto restart; } dqp->q_nrefs++; mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_hit(dqp); XFS_STATS_INC(xs_qm_dqcachehits); *O_dqpp = dqp; return 0; } mutex_unlock(&qi->qi_tree_lock); XFS_STATS_INC(xs_qm_dqcachemisses); /* * Dquot cache miss. We don't want to keep the inode lock across * a (potential) disk read. Also we don't want to deal with the lock * ordering between quotainode and this inode. OTOH, dropping the inode * lock here means dealing with a chown that can happen before * we re-acquire the lock. */ if (ip) xfs_iunlock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqread(mp, id, type, flags, &dqp); if (ip) xfs_ilock(ip, XFS_ILOCK_EXCL); if (error) return error; if (ip) { /* * A dquot could be attached to this inode by now, since * we had dropped the ilock. */ if (xfs_this_quota_on(mp, type)) { struct xfs_dquot *dqp1; dqp1 = xfs_inode_dquot(ip, type); if (dqp1) { xfs_qm_dqdestroy(dqp); dqp = dqp1; xfs_dqlock(dqp); goto dqret; } } else { /* inode stays locked on return */ xfs_qm_dqdestroy(dqp); return XFS_ERROR(ESRCH); } } mutex_lock(&qi->qi_tree_lock); error = -radix_tree_insert(tree, id, dqp); if (unlikely(error)) { WARN_ON(error != EEXIST); /* * Duplicate found. Just throw away the new dquot and start * over. */ mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_dup(dqp); xfs_qm_dqdestroy(dqp); XFS_STATS_INC(xs_qm_dquot_dups); goto restart; } /* * We return a locked dquot to the caller, with a reference taken */ xfs_dqlock(dqp); dqp->q_nrefs = 1; qi->qi_dquots++; mutex_unlock(&qi->qi_tree_lock); dqret: ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); trace_xfs_dqget_miss(dqp); *O_dqpp = dqp; return (0); } STATIC void xfs_qm_dqput_final( struct xfs_dquot *dqp) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; struct xfs_dquot *gdqp; trace_xfs_dqput_free(dqp); mutex_lock(&qi->qi_lru_lock); if (list_empty(&dqp->q_lru)) { list_add_tail(&dqp->q_lru, &qi->qi_lru_list); qi->qi_lru_count++; XFS_STATS_INC(xs_qm_dquot_unused); } mutex_unlock(&qi->qi_lru_lock); /* * If we just added a udquot to the freelist, then we want to release * the gdquot reference that it (probably) has. Otherwise it'll keep * the gdquot from getting reclaimed. */ gdqp = dqp->q_gdquot; if (gdqp) { xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } xfs_dqunlock(dqp); /* * If we had a group quota hint, release it now. */ if (gdqp) xfs_qm_dqput(gdqp); } /* * Release a reference to the dquot (decrement ref-count) and unlock it. * * If there is a group quota attached to this dquot, carefully release that * too without tripping over deadlocks'n'stuff. */ void xfs_qm_dqput( struct xfs_dquot *dqp) { ASSERT(dqp->q_nrefs > 0); ASSERT(XFS_DQ_IS_LOCKED(dqp)); trace_xfs_dqput(dqp); if (--dqp->q_nrefs > 0) xfs_dqunlock(dqp); else xfs_qm_dqput_final(dqp); } /* * Release a dquot. Flush it if dirty, then dqput() it. * dquot must not be locked. */ void xfs_qm_dqrele( xfs_dquot_t *dqp) { if (!dqp) return; trace_xfs_dqrele(dqp); xfs_dqlock(dqp); /* * We don't care to flush it if the dquot is dirty here. * That will create stutters that we want to avoid. * Instead we do a delayed write when we try to reclaim * a dirty dquot. Also xfs_sync will take part of the burden... */ xfs_qm_dqput(dqp); } /* * This is the dquot flushing I/O completion routine. It is called * from interrupt level when the buffer containing the dquot is * flushed to disk. It is responsible for removing the dquot logitem * from the AIL if it has not been re-logged, and unlocking the dquot's * flush lock. This behavior is very similar to that of inodes.. */ STATIC void xfs_qm_dqflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) { xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; xfs_dquot_t *dqp = qip->qli_dquot; struct xfs_ail *ailp = lip->li_ailp; /* * We only want to pull the item from the AIL if its * location in the log has not changed since we started the flush. * Thus, we only bother if the dquot's lsn has * not changed. First we check the lsn outside the lock * since it's cheaper, and then we recheck while * holding the lock before removing the dquot from the AIL. */ if ((lip->li_flags & XFS_LI_IN_AIL) && lip->li_lsn == qip->qli_flush_lsn) { /* xfs_trans_ail_delete() drops the AIL lock. */ spin_lock(&ailp->xa_lock); if (lip->li_lsn == qip->qli_flush_lsn) xfs_trans_ail_delete(ailp, lip); else spin_unlock(&ailp->xa_lock); } /* * Release the dq's flush lock since we're done with it. */ xfs_dqfunlock(dqp); } /* * Write a modified dquot to disk. * The dquot must be locked and the flush lock too taken by caller. * The flush lock will not be unlocked until the dquot reaches the disk, * but the dquot is free to be unlocked and modified by the caller * in the interim. Dquot is still locked on return. This behavior is * identical to that of inodes. */ int xfs_qm_dqflush( xfs_dquot_t *dqp, uint flags) { struct xfs_mount *mp = dqp->q_mount; struct xfs_buf *bp; struct xfs_disk_dquot *ddqp; int error; ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(!completion_done(&dqp->q_flush)); trace_xfs_dqflush(dqp); /* * If not dirty, or it's pinned and we are not supposed to block, nada. */ if (!XFS_DQ_IS_DIRTY(dqp) || ((flags & SYNC_TRYLOCK) && atomic_read(&dqp->q_pincount) > 0)) { xfs_dqfunlock(dqp); return 0; } xfs_qm_dqunpin_wait(dqp); /* * This may have been unpinned because the filesystem is shutting * down forcibly. If that's the case we must not write this dquot * to disk, because the log record didn't make it to disk! */ if (XFS_FORCED_SHUTDOWN(mp)) { dqp->dq_flags &= ~XFS_DQ_DIRTY; xfs_dqfunlock(dqp); return XFS_ERROR(EIO); } /* * Get the buffer containing the on-disk dquot */ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0, &bp); if (error) { ASSERT(error != ENOENT); xfs_dqfunlock(dqp); return error; } /* * Calculate the location of the dquot inside the buffer. */ ddqp = bp->b_addr + dqp->q_bufoffset; /* * A simple sanity check in case we got a corrupted dquot.. */ error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, XFS_QMOPT_DOWARN, "dqflush (incore copy)"); if (error) { xfs_buf_relse(bp); xfs_dqfunlock(dqp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return XFS_ERROR(EIO); } /* This is the only portion of data that needs to persist */ memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); /* * Clear the dirty field and remember the flush lsn for later use. */ dqp->dq_flags &= ~XFS_DQ_DIRTY; xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, &dqp->q_logitem.qli_item.li_lsn); /* * Attach an iodone routine so that we can remove this dquot from the * AIL and release the flush lock once the dquot is synced to disk. */ xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, &dqp->q_logitem.qli_item); /* * If the buffer is pinned then push on the log so we won't * get stuck waiting in the write for too long. */ if (xfs_buf_ispinned(bp)) { trace_xfs_dqflush_force(dqp); xfs_log_force(mp, 0); } if (flags & SYNC_WAIT) error = xfs_bwrite(bp); else xfs_buf_delwri_queue(bp); xfs_buf_relse(bp); trace_xfs_dqflush_done(dqp); /* * dqp is still locked, but caller is free to unlock it now. */ return error; } /* * Lock two xfs_dquot structures. * * To avoid deadlocks we always lock the quota structure with * the lowerd id first. */ void xfs_dqlock2( xfs_dquot_t *d1, xfs_dquot_t *d2) { if (d1 && d2) { ASSERT(d1 != d2); if (be32_to_cpu(d1->q_core.d_id) > be32_to_cpu(d2->q_core.d_id)) { mutex_lock(&d2->q_qlock); mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); } else { mutex_lock(&d1->q_qlock); mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); } } else if (d1) { mutex_lock(&d1->q_qlock); } else if (d2) { mutex_lock(&d2->q_qlock); } } /* * Give the buffer a little push if it is incore and * wait on the flush lock. */ void xfs_dqflock_pushbuf_wait( xfs_dquot_t *dqp) { xfs_mount_t *mp = dqp->q_mount; xfs_buf_t *bp; /* * Check to see if the dquot has been flushed delayed * write. If so, grab its buffer and send it * out immediately. We'll be able to acquire * the flush lock when the I/O completes. */ bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); if (!bp) goto out_lock; if (XFS_BUF_ISDELAYWRITE(bp)) { if (xfs_buf_ispinned(bp)) xfs_log_force(mp, 0); xfs_buf_delwri_promote(bp); wake_up_process(bp->b_target->bt_task); } xfs_buf_relse(bp); out_lock: xfs_dqflock(dqp); } int __init xfs_qm_init(void) { xfs_qm_dqzone = kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot"); if (!xfs_qm_dqzone) goto out; xfs_qm_dqtrxzone = kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx"); if (!xfs_qm_dqtrxzone) goto out_free_dqzone; return 0; out_free_dqzone: kmem_zone_destroy(xfs_qm_dqzone); out: return -ENOMEM; } void xfs_qm_exit(void) { kmem_zone_destroy(xfs_qm_dqtrxzone); kmem_zone_destroy(xfs_qm_dqzone); }
gpl-2.0
sony-msm8960/android_kernel_sony_apq8064
drivers/i2c/busses/i2c-ibm_iic.c
5064
19815
/* * drivers/i2c/busses/i2c-ibm_iic.c * * Support for the IIC peripheral on IBM PPC 4xx * * Copyright (c) 2003, 2004 Zultys Technologies. * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * * Copyright (c) 2008 PIKA Technologies * Sean MacLennan <smaclennan@pikatech.com> * * Based on original work by * Ian DaSilva <idasilva@mvista.com> * Armin Kuster <akuster@mvista.com> * Matt Porter <mporter@mvista.com> * * Copyright 2000-2003 MontaVista Software Inc. * * Original driver version was highly leveraged from i2c-elektor.c * * Copyright 1995-97 Simon G. Vogl * 1998-99 Hans Berglund * * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> * and even Frodo Looijaard <frodol@dds.nl> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/of_platform.h> #include <linux/of_i2c.h> #include "i2c-ibm_iic.h" #define DRIVER_VERSION "2.2" MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION); MODULE_LICENSE("GPL"); static bool iic_force_poll; module_param(iic_force_poll, bool, 0); MODULE_PARM_DESC(iic_force_poll, "Force polling mode"); static bool iic_force_fast; module_param(iic_force_fast, bool, 0); MODULE_PARM_DESC(iic_force_fast, "Force fast mode (400 kHz)"); #define DBG_LEVEL 0 #ifdef DBG #undef DBG #endif #ifdef DBG2 #undef DBG2 #endif #if DBG_LEVEL > 0 # define DBG(f,x...) printk(KERN_DEBUG "ibm-iic" f, ##x) #else # define DBG(f,x...) ((void)0) #endif #if DBG_LEVEL > 1 # define DBG2(f,x...) DBG(f, ##x) #else # define DBG2(f,x...) ((void)0) #endif #if DBG_LEVEL > 2 static void dump_iic_regs(const char* header, struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; printk(KERN_DEBUG "ibm-iic%d: %s\n", dev->idx, header); printk(KERN_DEBUG " cntl = 0x%02x, mdcntl = 0x%02x\n" " sts = 0x%02x, extsts = 0x%02x\n" " clkdiv = 0x%02x, xfrcnt = 0x%02x\n" " xtcntlss = 0x%02x, directcntl = 0x%02x\n", in_8(&iic->cntl), in_8(&iic->mdcntl), in_8(&iic->sts), in_8(&iic->extsts), in_8(&iic->clkdiv), in_8(&iic->xfrcnt), in_8(&iic->xtcntlss), in_8(&iic->directcntl)); } # define DUMP_REGS(h,dev) dump_iic_regs((h),(dev)) #else # define DUMP_REGS(h,dev) ((void)0) #endif /* Bus timings (in ns) for bit-banging */ static struct i2c_timings { unsigned int hd_sta; unsigned int su_sto; unsigned int low; unsigned int high; unsigned int buf; } timings [] = { /* Standard mode (100 KHz) */ { .hd_sta = 4000, .su_sto = 4000, .low = 4700, .high = 4000, .buf = 4700, }, /* Fast mode (400 KHz) */ { .hd_sta = 600, .su_sto = 600, .low = 1300, .high = 600, .buf = 1300, }}; /* Enable/disable interrupt generation */ static inline void iic_interrupt_mode(struct ibm_iic_private* dev, int enable) { out_8(&dev->vaddr->intmsk, enable ? INTRMSK_EIMTC : 0); } /* * Initialize IIC interface. */ static void iic_dev_init(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; DBG("%d: init\n", dev->idx); /* Clear master address */ out_8(&iic->lmadr, 0); out_8(&iic->hmadr, 0); /* Clear slave address */ out_8(&iic->lsadr, 0); out_8(&iic->hsadr, 0); /* Clear status & extended status */ out_8(&iic->sts, STS_SCMP | STS_IRQA); out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); /* Set clock divider */ out_8(&iic->clkdiv, dev->clckdiv); /* Clear transfer count */ out_8(&iic->xfrcnt, 0); /* Clear extended control and status */ out_8(&iic->xtcntlss, XTCNTLSS_SRC | XTCNTLSS_SRS | XTCNTLSS_SWC | XTCNTLSS_SWS); /* Clear control register */ out_8(&iic->cntl, 0); /* Enable interrupts if possible */ iic_interrupt_mode(dev, dev->irq >= 0); /* Set mode control */ out_8(&iic->mdcntl, MDCNTL_FMDB | MDCNTL_EINT | MDCNTL_EUBS | (dev->fast_mode ? MDCNTL_FSM : 0)); DUMP_REGS("iic_init", dev); } /* * Reset IIC interface */ static void iic_dev_reset(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; int i; u8 dc; DBG("%d: soft reset\n", dev->idx); DUMP_REGS("reset", dev); /* Place chip in the reset state */ out_8(&iic->xtcntlss, XTCNTLSS_SRST); /* Check if bus is free */ dc = in_8(&iic->directcntl); if (!DIRCTNL_FREE(dc)){ DBG("%d: trying to regain bus control\n", dev->idx); /* Try to set bus free state */ out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); /* Wait until we regain bus control */ for (i = 0; i < 100; ++i){ dc = in_8(&iic->directcntl); if (DIRCTNL_FREE(dc)) break; /* Toggle SCL line */ dc ^= DIRCNTL_SCC; out_8(&iic->directcntl, dc); udelay(10); dc ^= DIRCNTL_SCC; out_8(&iic->directcntl, dc); /* be nice */ cond_resched(); } } /* Remove reset */ out_8(&iic->xtcntlss, 0); /* Reinitialize interface */ iic_dev_init(dev); } /* * Do 0-length transaction using bit-banging through IIC_DIRECTCNTL register. */ /* Wait for SCL and/or SDA to be high */ static int iic_dc_wait(volatile struct iic_regs __iomem *iic, u8 mask) { unsigned long x = jiffies + HZ / 28 + 2; while ((in_8(&iic->directcntl) & mask) != mask){ if (unlikely(time_after(jiffies, x))) return -1; cond_resched(); } return 0; } static int iic_smbus_quick(struct ibm_iic_private* dev, const struct i2c_msg* p) { volatile struct iic_regs __iomem *iic = dev->vaddr; const struct i2c_timings* t = &timings[dev->fast_mode ? 1 : 0]; u8 mask, v, sda; int i, res; /* Only 7-bit addresses are supported */ if (unlikely(p->flags & I2C_M_TEN)){ DBG("%d: smbus_quick - 10 bit addresses are not supported\n", dev->idx); return -EINVAL; } DBG("%d: smbus_quick(0x%02x)\n", dev->idx, p->addr); /* Reset IIC interface */ out_8(&iic->xtcntlss, XTCNTLSS_SRST); /* Wait for bus to become free */ out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSDA | DIRCNTL_MSC))) goto err; ndelay(t->buf); /* START */ out_8(&iic->directcntl, DIRCNTL_SCC); sda = 0; ndelay(t->hd_sta); /* Send address */ v = (u8)((p->addr << 1) | ((p->flags & I2C_M_RD) ? 1 : 0)); for (i = 0, mask = 0x80; i < 8; ++i, mask >>= 1){ out_8(&iic->directcntl, sda); ndelay(t->low / 2); sda = (v & mask) ? DIRCNTL_SDAC : 0; out_8(&iic->directcntl, sda); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SCC | sda); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; ndelay(t->high); } /* ACK */ out_8(&iic->directcntl, sda); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SDAC); ndelay(t->low / 2); out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; res = (in_8(&iic->directcntl) & DIRCNTL_MSDA) ? -EREMOTEIO : 1; ndelay(t->high); /* STOP */ out_8(&iic->directcntl, 0); ndelay(t->low); out_8(&iic->directcntl, DIRCNTL_SCC); if (unlikely(iic_dc_wait(iic, DIRCNTL_MSC))) goto err; ndelay(t->su_sto); out_8(&iic->directcntl, DIRCNTL_SDAC | DIRCNTL_SCC); ndelay(t->buf); DBG("%d: smbus_quick -> %s\n", dev->idx, res ? "NACK" : "ACK"); out: /* Remove reset */ out_8(&iic->xtcntlss, 0); /* Reinitialize interface */ iic_dev_init(dev); return res; err: DBG("%d: smbus_quick - bus is stuck\n", dev->idx); res = -EREMOTEIO; goto out; } /* * IIC interrupt handler */ static irqreturn_t iic_handler(int irq, void *dev_id) { struct ibm_iic_private* dev = (struct ibm_iic_private*)dev_id; volatile struct iic_regs __iomem *iic = dev->vaddr; DBG2("%d: irq handler, STS = 0x%02x, EXTSTS = 0x%02x\n", dev->idx, in_8(&iic->sts), in_8(&iic->extsts)); /* Acknowledge IRQ and wakeup iic_wait_for_tc */ out_8(&iic->sts, STS_IRQA | STS_SCMP); wake_up_interruptible(&dev->wq); return IRQ_HANDLED; } /* * Get master transfer result and clear errors if any. * Returns the number of actually transferred bytes or error (<0) */ static int iic_xfer_result(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; if (unlikely(in_8(&iic->sts) & STS_ERR)){ DBG("%d: xfer error, EXTSTS = 0x%02x\n", dev->idx, in_8(&iic->extsts)); /* Clear errors and possible pending IRQs */ out_8(&iic->extsts, EXTSTS_IRQP | EXTSTS_IRQD | EXTSTS_LA | EXTSTS_ICT | EXTSTS_XFRA); /* Flush master data buffer */ out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); /* Is bus free? * If error happened during combined xfer * IIC interface is usually stuck in some strange * state, the only way out - soft reset. */ if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ DBG("%d: bus is stuck, resetting\n", dev->idx); iic_dev_reset(dev); } return -EREMOTEIO; } else return in_8(&iic->xfrcnt) & XFRCNT_MTC_MASK; } /* * Try to abort active transfer. */ static void iic_abort_xfer(struct ibm_iic_private* dev) { volatile struct iic_regs __iomem *iic = dev->vaddr; unsigned long x; DBG("%d: iic_abort_xfer\n", dev->idx); out_8(&iic->cntl, CNTL_HMT); /* * Wait for the abort command to complete. * It's not worth to be optimized, just poll (timeout >= 1 tick) */ x = jiffies + 2; while ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ if (time_after(jiffies, x)){ DBG("%d: abort timeout, resetting...\n", dev->idx); iic_dev_reset(dev); return; } schedule(); } /* Just to clear errors */ iic_xfer_result(dev); } /* * Wait for master transfer to complete. * It puts current process to sleep until we get interrupt or timeout expires. * Returns the number of transferred bytes or error (<0) */ static int iic_wait_for_tc(struct ibm_iic_private* dev){ volatile struct iic_regs __iomem *iic = dev->vaddr; int ret = 0; if (dev->irq >= 0){ /* Interrupt mode */ ret = wait_event_interruptible_timeout(dev->wq, !(in_8(&iic->sts) & STS_PT), dev->adap.timeout); if (unlikely(ret < 0)) DBG("%d: wait interrupted\n", dev->idx); else if (unlikely(in_8(&iic->sts) & STS_PT)){ DBG("%d: wait timeout\n", dev->idx); ret = -ETIMEDOUT; } } else { /* Polling mode */ unsigned long x = jiffies + dev->adap.timeout; while (in_8(&iic->sts) & STS_PT){ if (unlikely(time_after(jiffies, x))){ DBG("%d: poll timeout\n", dev->idx); ret = -ETIMEDOUT; break; } if (unlikely(signal_pending(current))){ DBG("%d: poll interrupted\n", dev->idx); ret = -ERESTARTSYS; break; } schedule(); } } if (unlikely(ret < 0)) iic_abort_xfer(dev); else ret = iic_xfer_result(dev); DBG2("%d: iic_wait_for_tc -> %d\n", dev->idx, ret); return ret; } /* * Low level master transfer routine */ static int iic_xfer_bytes(struct ibm_iic_private* dev, struct i2c_msg* pm, int combined_xfer) { volatile struct iic_regs __iomem *iic = dev->vaddr; char* buf = pm->buf; int i, j, loops, ret = 0; int len = pm->len; u8 cntl = (in_8(&iic->cntl) & CNTL_AMD) | CNTL_PT; if (pm->flags & I2C_M_RD) cntl |= CNTL_RW; loops = (len + 3) / 4; for (i = 0; i < loops; ++i, len -= 4){ int count = len > 4 ? 4 : len; u8 cmd = cntl | ((count - 1) << CNTL_TCT_SHIFT); if (!(cntl & CNTL_RW)) for (j = 0; j < count; ++j) out_8((void __iomem *)&iic->mdbuf, *buf++); if (i < loops - 1) cmd |= CNTL_CHT; else if (combined_xfer) cmd |= CNTL_RPST; DBG2("%d: xfer_bytes, %d, CNTL = 0x%02x\n", dev->idx, count, cmd); /* Start transfer */ out_8(&iic->cntl, cmd); /* Wait for completion */ ret = iic_wait_for_tc(dev); if (unlikely(ret < 0)) break; else if (unlikely(ret != count)){ DBG("%d: xfer_bytes, requested %d, transferred %d\n", dev->idx, count, ret); /* If it's not a last part of xfer, abort it */ if (combined_xfer || (i < loops - 1)) iic_abort_xfer(dev); ret = -EREMOTEIO; break; } if (cntl & CNTL_RW) for (j = 0; j < count; ++j) *buf++ = in_8((void __iomem *)&iic->mdbuf); } return ret > 0 ? 0 : ret; } /* * Set target slave address for master transfer */ static inline void iic_address(struct ibm_iic_private* dev, struct i2c_msg* msg) { volatile struct iic_regs __iomem *iic = dev->vaddr; u16 addr = msg->addr; DBG2("%d: iic_address, 0x%03x (%d-bit)\n", dev->idx, addr, msg->flags & I2C_M_TEN ? 10 : 7); if (msg->flags & I2C_M_TEN){ out_8(&iic->cntl, CNTL_AMD); out_8(&iic->lmadr, addr); out_8(&iic->hmadr, 0xf0 | ((addr >> 7) & 0x06)); } else { out_8(&iic->cntl, 0); out_8(&iic->lmadr, addr << 1); } } static inline int iic_invalid_address(const struct i2c_msg* p) { return (p->addr > 0x3ff) || (!(p->flags & I2C_M_TEN) && (p->addr > 0x7f)); } static inline int iic_address_neq(const struct i2c_msg* p1, const struct i2c_msg* p2) { return (p1->addr != p2->addr) || ((p1->flags & I2C_M_TEN) != (p2->flags & I2C_M_TEN)); } /* * Generic master transfer entrypoint. * Returns the number of processed messages or error (<0) */ static int iic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct ibm_iic_private* dev = (struct ibm_iic_private*)(i2c_get_adapdata(adap)); volatile struct iic_regs __iomem *iic = dev->vaddr; int i, ret = 0; DBG2("%d: iic_xfer, %d msg(s)\n", dev->idx, num); if (!num) return 0; /* Check the sanity of the passed messages. * Uhh, generic i2c layer is more suitable place for such code... */ if (unlikely(iic_invalid_address(&msgs[0]))){ DBG("%d: invalid address 0x%03x (%d-bit)\n", dev->idx, msgs[0].addr, msgs[0].flags & I2C_M_TEN ? 10 : 7); return -EINVAL; } for (i = 0; i < num; ++i){ if (unlikely(msgs[i].len <= 0)){ if (num == 1 && !msgs[0].len){ /* Special case for I2C_SMBUS_QUICK emulation. * IBM IIC doesn't support 0-length transactions * so we have to emulate them using bit-banging. */ return iic_smbus_quick(dev, &msgs[0]); } DBG("%d: invalid len %d in msg[%d]\n", dev->idx, msgs[i].len, i); return -EINVAL; } if (unlikely(iic_address_neq(&msgs[0], &msgs[i]))){ DBG("%d: invalid addr in msg[%d]\n", dev->idx, i); return -EINVAL; } } /* Check bus state */ if (unlikely((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE)){ DBG("%d: iic_xfer, bus is not free\n", dev->idx); /* Usually it means something serious has happened. * We *cannot* have unfinished previous transfer * so it doesn't make any sense to try to stop it. * Probably we were not able to recover from the * previous error. * The only *reasonable* thing I can think of here * is soft reset. --ebs */ iic_dev_reset(dev); if ((in_8(&iic->extsts) & EXTSTS_BCS_MASK) != EXTSTS_BCS_FREE){ DBG("%d: iic_xfer, bus is still not free\n", dev->idx); return -EREMOTEIO; } } else { /* Flush master data buffer (just in case) */ out_8(&iic->mdcntl, in_8(&iic->mdcntl) | MDCNTL_FMDB); } /* Load slave address */ iic_address(dev, &msgs[0]); /* Do real transfer */ for (i = 0; i < num && !ret; ++i) ret = iic_xfer_bytes(dev, &msgs[i], i < num - 1); return ret < 0 ? ret : num; } static u32 iic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm iic_algo = { .master_xfer = iic_xfer, .functionality = iic_func }; /* * Calculates IICx_CLCKDIV value for a specific OPB clock frequency */ static inline u8 iic_clckdiv(unsigned int opb) { /* Compatibility kludge, should go away after all cards * are fixed to fill correct value for opbfreq. * Previous driver version used hardcoded divider value 4, * it corresponds to OPB frequency from the range (40, 50] MHz */ if (!opb){ printk(KERN_WARNING "ibm-iic: using compatibility value for OPB freq," " fix your board specific setup\n"); opb = 50000000; } /* Convert to MHz */ opb /= 1000000; if (opb < 20 || opb > 150){ printk(KERN_WARNING "ibm-iic: invalid OPB clock frequency %u MHz\n", opb); opb = opb < 20 ? 20 : 150; } return (u8)((opb + 9) / 10 - 1); } static int __devinit iic_request_irq(struct platform_device *ofdev, struct ibm_iic_private *dev) { struct device_node *np = ofdev->dev.of_node; int irq; if (iic_force_poll) return 0; irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n"); return 0; } /* Disable interrupts until we finish initialization, assumes * level-sensitive IRQ setup... */ iic_interrupt_mode(dev, 0); if (request_irq(irq, iic_handler, 0, "IBM IIC", dev)) { dev_err(&ofdev->dev, "request_irq %d failed\n", irq); /* Fallback to the polling mode */ return 0; } return irq; } /* * Register single IIC interface */ static int __devinit iic_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct ibm_iic_private *dev; struct i2c_adapter *adap; const u32 *freq; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { dev_err(&ofdev->dev, "failed to allocate device data\n"); return -ENOMEM; } dev_set_drvdata(&ofdev->dev, dev); dev->vaddr = of_iomap(np, 0); if (dev->vaddr == NULL) { dev_err(&ofdev->dev, "failed to iomap device\n"); ret = -ENXIO; goto error_cleanup; } init_waitqueue_head(&dev->wq); dev->irq = iic_request_irq(ofdev, dev); if (!dev->irq) dev_warn(&ofdev->dev, "using polling mode\n"); /* Board specific settings */ if (iic_force_fast || of_get_property(np, "fast-mode", NULL)) dev->fast_mode = 1; freq = of_get_property(np, "clock-frequency", NULL); if (freq == NULL) { freq = of_get_property(np->parent, "clock-frequency", NULL); if (freq == NULL) { dev_err(&ofdev->dev, "Unable to get bus frequency\n"); ret = -EINVAL; goto error_cleanup; } } dev->clckdiv = iic_clckdiv(*freq); dev_dbg(&ofdev->dev, "clckdiv = %d\n", dev->clckdiv); /* Initialize IIC interface */ iic_dev_init(dev); /* Register it with i2c layer */ adap = &dev->adap; adap->dev.parent = &ofdev->dev; adap->dev.of_node = of_node_get(np); strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); i2c_set_adapdata(adap, dev); adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &iic_algo; adap->timeout = HZ; ret = i2c_add_adapter(adap); if (ret < 0) { dev_err(&ofdev->dev, "failed to register i2c adapter\n"); goto error_cleanup; } dev_info(&ofdev->dev, "using %s mode\n", dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); /* Now register all the child nodes */ of_i2c_register_devices(adap); return 0; error_cleanup: if (dev->irq) { iic_interrupt_mode(dev, 0); free_irq(dev->irq, dev); } if (dev->vaddr) iounmap(dev->vaddr); dev_set_drvdata(&ofdev->dev, NULL); kfree(dev); return ret; } /* * Cleanup initialized IIC interface */ static int __devexit iic_remove(struct platform_device *ofdev) { struct ibm_iic_private *dev = dev_get_drvdata(&ofdev->dev); dev_set_drvdata(&ofdev->dev, NULL); i2c_del_adapter(&dev->adap); if (dev->irq) { iic_interrupt_mode(dev, 0); free_irq(dev->irq, dev); } iounmap(dev->vaddr); kfree(dev); return 0; } static const struct of_device_id ibm_iic_match[] = { { .compatible = "ibm,iic", }, {} }; static struct platform_driver ibm_iic_driver = { .driver = { .name = "ibm-iic", .owner = THIS_MODULE, .of_match_table = ibm_iic_match, }, .probe = iic_probe, .remove = __devexit_p(iic_remove), }; module_platform_driver(ibm_iic_driver);
gpl-2.0
LeJay/android_kernel_samsung_I9505G
drivers/video/cg3.c
8136
11698
/* cg3.c: CGTHREE frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int cg3_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int cg3_blank(int, struct fb_info *); static int cg3_mmap(struct fb_info *, struct vm_area_struct *); static int cg3_ioctl(struct fb_info *, unsigned int, unsigned long); /* * Frame buffer operations */ static struct fb_ops cg3_ops = { .owner = THIS_MODULE, .fb_setcolreg = cg3_setcolreg, .fb_blank = cg3_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = cg3_mmap, .fb_ioctl = cg3_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* Control Register Constants */ #define CG3_CR_ENABLE_INTS 0x80 #define CG3_CR_ENABLE_VIDEO 0x40 #define CG3_CR_ENABLE_TIMING 0x20 #define CG3_CR_ENABLE_CURCMP 0x10 #define CG3_CR_XTAL_MASK 0x0c #define CG3_CR_DIVISOR_MASK 0x03 /* Status Register Constants */ #define CG3_SR_PENDING_INT 0x80 #define CG3_SR_RES_MASK 0x70 #define CG3_SR_1152_900_76_A 0x40 #define CG3_SR_1152_900_76_B 0x60 #define CG3_SR_ID_MASK 0x0f #define CG3_SR_ID_COLOR 0x01 #define CG3_SR_ID_MONO 0x02 #define CG3_SR_ID_MONO_ECL 0x03 enum cg3_type { CG3_AT_66HZ = 0, CG3_AT_76HZ, CG3_RDI }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; struct cg3_regs { struct bt_regs cmap; u8 control; u8 status; u8 cursor_start; u8 cursor_end; u8 h_blank_start; u8 h_blank_end; u8 h_sync_start; u8 h_sync_end; u8 comp_sync_end; u8 v_blank_start_high; u8 v_blank_start_low; u8 v_blank_end; u8 v_sync_start; u8 v_sync_end; u8 xfer_holdoff_start; u8 xfer_holdoff_end; }; /* Offset of interesting structures in the OBIO space */ #define CG3_REGS_OFFSET 0x400000UL #define CG3_RAM_OFFSET 0x800000UL struct cg3_par { spinlock_t lock; struct cg3_regs __iomem *regs; u32 sw_cmap[((256 * 3) + 3) / 4]; u32 flags; #define CG3_FLAG_BLANKED 0x00000001 #define CG3_FLAG_RDI 0x00000002 unsigned long which_io; }; /** * cg3_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * The cg3 palette is loaded with 4 color values at each time * so you end up with: (rgb)(r), (gb)(rg), (b)(rgb), and so on. * We keep a sw copy of the hw cmap to assist us in this esoteric * loading procedure. */ static int cg3_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct cg3_par *par = (struct cg3_par *) info->par; struct bt_regs __iomem *bt = &par->regs->cmap; unsigned long flags; u32 *p32; u8 *p8; int count; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); p8 = (u8 *)par->sw_cmap + (regno * 3); p8[0] = red; p8[1] = green; p8[2] = blue; #define D4M3(x) ((((x)>>2)<<1) + ((x)>>2)) /* (x/4)*3 */ #define D4M4(x) ((x)&~0x3) /* (x/4)*4 */ count = 3; p32 = &par->sw_cmap[D4M3(regno)]; sbus_writel(D4M4(regno), &bt->addr); while (count--) sbus_writel(*p32++, &bt->color_map); #undef D4M3 #undef D4M4 spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * cg3_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int cg3_blank(int blank, struct fb_info *info) { struct cg3_par *par = (struct cg3_par *) info->par; struct cg3_regs __iomem *regs = par->regs; unsigned long flags; u8 val; spin_lock_irqsave(&par->lock, flags); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val = sbus_readb(&regs->control); val |= CG3_CR_ENABLE_VIDEO; sbus_writeb(val, &regs->control); par->flags &= ~CG3_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val = sbus_readb(&regs->control); val &= ~CG3_CR_ENABLE_VIDEO; sbus_writeb(val, &regs->control); par->flags |= CG3_FLAG_BLANKED; break; } spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map cg3_mmap_map[] = { { .voff = CG3_MMAP_OFFSET, .poff = CG3_RAM_OFFSET, .size = SBUS_MMAP_FBSIZE(1) }, { .size = 0 } }; static int cg3_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct cg3_par *par = (struct cg3_par *)info->par; return sbusfb_mmap_helper(cg3_mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_SUN3COLOR, 8, info->fix.smem_len); } /* * Initialisation */ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CGTHREE; } static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, struct device_node *dp) { const char *params; char *p; int ww, hh; params = of_get_property(dp, "params", NULL); if (params) { ww = simple_strtoul(params, &p, 10); if (ww && *p == 'x') { hh = simple_strtoul(p + 1, &p, 10); if (hh && *p == '-') { if (var->xres != ww || var->yres != hh) { var->xres = var->xres_virtual = ww; var->yres = var->yres_virtual = hh; } } } } } static u8 cg3regvals_66hz[] __devinitdata = { /* 1152 x 900, 66 Hz */ 0x14, 0xbb, 0x15, 0x2b, 0x16, 0x04, 0x17, 0x14, 0x18, 0xae, 0x19, 0x03, 0x1a, 0xa8, 0x1b, 0x24, 0x1c, 0x01, 0x1d, 0x05, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x20, 0 }; static u8 cg3regvals_76hz[] __devinitdata = { /* 1152 x 900, 76 Hz */ 0x14, 0xb7, 0x15, 0x27, 0x16, 0x03, 0x17, 0x0f, 0x18, 0xae, 0x19, 0x03, 0x1a, 0xae, 0x1b, 0x2a, 0x1c, 0x01, 0x1d, 0x09, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x24, 0 }; static u8 cg3regvals_rdi[] __devinitdata = { /* 640 x 480, cgRDI */ 0x14, 0x70, 0x15, 0x20, 0x16, 0x08, 0x17, 0x10, 0x18, 0x06, 0x19, 0x02, 0x1a, 0x31, 0x1b, 0x51, 0x1c, 0x06, 0x1d, 0x0c, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x22, 0 }; static u8 *cg3_regvals[] __devinitdata = { cg3regvals_66hz, cg3regvals_76hz, cg3regvals_rdi }; static u_char cg3_dacvals[] __devinitdata = { 4, 0xff, 5, 0x00, 6, 0x70, 7, 0x00, 0 }; static int __devinit cg3_do_default_mode(struct cg3_par *par) { enum cg3_type type; u8 *p; if (par->flags & CG3_FLAG_RDI) type = CG3_RDI; else { u8 status = sbus_readb(&par->regs->status), mon; if ((status & CG3_SR_ID_MASK) == CG3_SR_ID_COLOR) { mon = status & CG3_SR_RES_MASK; if (mon == CG3_SR_1152_900_76_A || mon == CG3_SR_1152_900_76_B) type = CG3_AT_76HZ; else type = CG3_AT_66HZ; } else { printk(KERN_ERR "cgthree: can't handle SR %02x\n", status); return -EINVAL; } } for (p = cg3_regvals[type]; *p; p += 2) { u8 __iomem *regp = &((u8 __iomem *)par->regs)[p[0]]; sbus_writeb(p[1], regp); } for (p = cg3_dacvals; *p; p += 2) { u8 __iomem *regp; regp = (u8 __iomem *)&par->regs->cmap.addr; sbus_writeb(p[0], regp); regp = (u8 __iomem *)&par->regs->cmap.control; sbus_writeb(p[1], regp); } return 0; } static int __devinit cg3_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg3_par *par; int linebytes, err; info = framebuffer_alloc(sizeof(struct cg3_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; if (!strcmp(dp->name, "cgRDI")) par->flags |= CG3_FLAG_RDI; if (par->flags & CG3_FLAG_RDI) cg3_rdi_maybe_fixup_var(&info->var, dp); linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); par->regs = of_ioremap(&op->resource[0], CG3_REGS_OFFSET, sizeof(struct cg3_regs), "cg3 regs"); if (!par->regs) goto out_release_fb; info->flags = FBINFO_DEFAULT; info->fbops = &cg3_ops; info->screen_base = of_ioremap(&op->resource[0], CG3_RAM_OFFSET, info->fix.smem_len, "cg3 ram"); if (!info->screen_base) goto out_unmap_regs; cg3_blank(FB_BLANK_UNBLANK, info); if (!of_find_property(dp, "width", NULL)) { err = cg3_do_default_mode(par); if (err) goto out_unmap_screen; } if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_screen; fb_set_cmap(&info->cmap, info); cg3_init_fix(info, linebytes, dp); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: cg3 at %lx:%lx\n", dp->full_name, par->which_io, info->fix.smem_start); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_screen: of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); out_unmap_regs: of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs)); out_release_fb: framebuffer_release(info); out_err: return err; } static int __devexit cg3_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct cg3_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs)); of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); framebuffer_release(info); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id cg3_match[] = { { .name = "cgthree", }, { .name = "cgRDI", }, {}, }; MODULE_DEVICE_TABLE(of, cg3_match); static struct platform_driver cg3_driver = { .driver = { .name = "cg3", .owner = THIS_MODULE, .of_match_table = cg3_match, }, .probe = cg3_probe, .remove = __devexit_p(cg3_remove), }; static int __init cg3_init(void) { if (fb_get_options("cg3fb", NULL)) return -ENODEV; return platform_driver_register(&cg3_driver); } static void __exit cg3_exit(void) { platform_driver_unregister(&cg3_driver); } module_init(cg3_init); module_exit(cg3_exit); MODULE_DESCRIPTION("framebuffer driver for CGthree chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
pugs/vfio-linux-2.6
security/apparmor/context.c
10696
5518
/* * AppArmor security module * * This file contains AppArmor functions used to manipulate object security * contexts. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. * * * AppArmor sets confinement on every task, via the the aa_task_cxt and * the aa_task_cxt.profile, both of which are required and are not allowed * to be NULL. The aa_task_cxt is not reference counted and is unique * to each cred (which is reference count). The profile pointed to by * the task_cxt is reference counted. * * TODO * If a task uses change_hat it currently does not return to the old * cred or task context but instead creates a new one. Ideally the task * should return to the previous cred if it has not been modified. * */ #include "include/context.h" #include "include/policy.h" /** * aa_alloc_task_context - allocate a new task_cxt * @flags: gfp flags for allocation * * Returns: allocated buffer or NULL on failure */ struct aa_task_cxt *aa_alloc_task_context(gfp_t flags) { return kzalloc(sizeof(struct aa_task_cxt), flags); } /** * aa_free_task_context - free a task_cxt * @cxt: task_cxt to free (MAYBE NULL) */ void aa_free_task_context(struct aa_task_cxt *cxt) { if (cxt) { aa_put_profile(cxt->profile); aa_put_profile(cxt->previous); aa_put_profile(cxt->onexec); kzfree(cxt); } } /** * aa_dup_task_context - duplicate a task context, incrementing reference counts * @new: a blank task context (NOT NULL) * @old: the task context to copy (NOT NULL) */ void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old) { *new = *old; aa_get_profile(new->profile); aa_get_profile(new->previous); aa_get_profile(new->onexec); } /** * aa_replace_current_profile - replace the current tasks profiles * @profile: new profile (NOT NULL) * * Returns: 0 or error on failure */ int aa_replace_current_profile(struct aa_profile *profile) { struct aa_task_cxt *cxt = current_cred()->security; struct cred *new; BUG_ON(!profile); if (cxt->profile == profile) return 0; new = prepare_creds(); if (!new) return -ENOMEM; cxt = new->security; if (unconfined(profile) || (cxt->profile->ns != profile->ns)) { /* if switching to unconfined or a different profile namespace * clear out context state */ aa_put_profile(cxt->previous); aa_put_profile(cxt->onexec); cxt->previous = NULL; cxt->onexec = NULL; cxt->token = 0; } /* be careful switching cxt->profile, when racing replacement it * is possible that cxt->profile->replacedby is the reference keeping * @profile valid, so make sure to get its reference before dropping * the reference on cxt->profile */ aa_get_profile(profile); aa_put_profile(cxt->profile); cxt->profile = profile; commit_creds(new); return 0; } /** * aa_set_current_onexec - set the tasks change_profile to happen onexec * @profile: system profile to set at exec (MAYBE NULL to clear value) * * Returns: 0 or error on failure */ int aa_set_current_onexec(struct aa_profile *profile) { struct aa_task_cxt *cxt; struct cred *new = prepare_creds(); if (!new) return -ENOMEM; cxt = new->security; aa_get_profile(profile); aa_put_profile(cxt->onexec); cxt->onexec = profile; commit_creds(new); return 0; } /** * aa_set_current_hat - set the current tasks hat * @profile: profile to set as the current hat (NOT NULL) * @token: token value that must be specified to change from the hat * * Do switch of tasks hat. If the task is currently in a hat * validate the token to match. * * Returns: 0 or error on failure */ int aa_set_current_hat(struct aa_profile *profile, u64 token) { struct aa_task_cxt *cxt; struct cred *new = prepare_creds(); if (!new) return -ENOMEM; BUG_ON(!profile); cxt = new->security; if (!cxt->previous) { /* transfer refcount */ cxt->previous = cxt->profile; cxt->token = token; } else if (cxt->token == token) { aa_put_profile(cxt->profile); } else { /* previous_profile && cxt->token != token */ abort_creds(new); return -EACCES; } cxt->profile = aa_get_profile(aa_newest_version(profile)); /* clear exec on switching context */ aa_put_profile(cxt->onexec); cxt->onexec = NULL; commit_creds(new); return 0; } /** * aa_restore_previous_profile - exit from hat context restoring the profile * @token: the token that must be matched to exit hat context * * Attempt to return out of a hat to the previous profile. The token * must match the stored token value. * * Returns: 0 or error of failure */ int aa_restore_previous_profile(u64 token) { struct aa_task_cxt *cxt; struct cred *new = prepare_creds(); if (!new) return -ENOMEM; cxt = new->security; if (cxt->token != token) { abort_creds(new); return -EACCES; } /* ignore restores when there is no saved profile */ if (!cxt->previous) { abort_creds(new); return 0; } aa_put_profile(cxt->profile); cxt->profile = aa_newest_version(cxt->previous); BUG_ON(!cxt->profile); if (unlikely(cxt->profile != cxt->previous)) { aa_get_profile(cxt->profile); aa_put_profile(cxt->previous); } /* clear exec && prev information when restoring to previous context */ cxt->previous = NULL; cxt->token = 0; aa_put_profile(cxt->onexec); cxt->onexec = NULL; commit_creds(new); return 0; }
gpl-2.0
MahoSata/linux-4.3-sata
sound/soc/fsl/fsl_spdif.c
201
36373
/* * Freescale S/PDIF ALSA SoC Digital Audio Interface (DAI) driver * * Copyright (C) 2013 Freescale Semiconductor, Inc. * * Based on stmp3xxx_spdif_dai.c * Vladimir Barinov <vbarinov@embeddedalley.com> * Copyright 2008 SigmaTel, Inc * Copyright 2008 Embedded Alley Solutions, Inc * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/regmap.h> #include <sound/asoundef.h> #include <sound/dmaengine_pcm.h> #include <sound/soc.h> #include "fsl_spdif.h" #include "imx-pcm.h" #define FSL_SPDIF_TXFIFO_WML 0x8 #define FSL_SPDIF_RXFIFO_WML 0x8 #define INTR_FOR_PLAYBACK (INT_TXFIFO_RESYNC) #define INTR_FOR_CAPTURE (INT_SYM_ERR | INT_BIT_ERR | INT_URX_FUL |\ INT_URX_OV | INT_QRX_FUL | INT_QRX_OV |\ INT_UQ_SYNC | INT_UQ_ERR | INT_RXFIFO_RESYNC |\ INT_LOSS_LOCK | INT_DPLL_LOCKED) #define SIE_INTR_FOR(tx) (tx ? INTR_FOR_PLAYBACK : INTR_FOR_CAPTURE) /* Index list for the values that has if (DPLL Locked) condition */ static u8 srpc_dpll_locked[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0xa, 0xb }; #define SRPC_NODPLL_START1 0x5 #define SRPC_NODPLL_START2 0xc #define DEFAULT_RXCLK_SRC 1 /* * SPDIF control structure * Defines channel status, subcode and Q sub */ struct spdif_mixer_control { /* spinlock to access control data */ spinlock_t ctl_lock; /* IEC958 channel tx status bit */ unsigned char ch_status[4]; /* User bits */ unsigned char subcode[2 * SPDIF_UBITS_SIZE]; /* Q subcode part of user bits */ unsigned char qsub[2 * SPDIF_QSUB_SIZE]; /* Buffer offset for U/Q */ u32 upos; u32 qpos; /* Ready buffer index of the two buffers */ u32 ready_buf; }; /** * fsl_spdif_priv: Freescale SPDIF private data * * @fsl_spdif_control: SPDIF control data * @cpu_dai_drv: cpu dai driver * @pdev: platform device pointer * @regmap: regmap handler * @dpll_locked: dpll lock flag * @txrate: the best rates for playback * @txclk_df: STC_TXCLK_DF dividers value for playback * @sysclk_df: STC_SYSCLK_DF dividers value for playback * @txclk_src: STC_TXCLK_SRC values for playback * @rxclk_src: SRPC_CLKSRC_SEL values for capture * @txclk: tx clock sources for playback * @rxclk: rx clock sources for capture * @coreclk: core clock for register access via DMA * @sysclk: system clock for rx clock rate measurement * @dma_params_tx: DMA parameters for transmit channel * @dma_params_rx: DMA parameters for receive channel */ struct fsl_spdif_priv { struct spdif_mixer_control fsl_spdif_control; struct snd_soc_dai_driver cpu_dai_drv; struct platform_device *pdev; struct regmap *regmap; bool dpll_locked; u32 txrate[SPDIF_TXRATE_MAX]; u8 txclk_df[SPDIF_TXRATE_MAX]; u8 sysclk_df[SPDIF_TXRATE_MAX]; u8 txclk_src[SPDIF_TXRATE_MAX]; u8 rxclk_src; struct clk *txclk[SPDIF_TXRATE_MAX]; struct clk *rxclk; struct clk *coreclk; struct clk *sysclk; struct snd_dmaengine_dai_dma_data dma_params_tx; struct snd_dmaengine_dai_dma_data dma_params_rx; }; /* DPLL locked and lock loss interrupt handler */ static void spdif_irq_dpll_lock(struct fsl_spdif_priv *spdif_priv) { struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; u32 locked; regmap_read(regmap, REG_SPDIF_SRPC, &locked); locked &= SRPC_DPLL_LOCKED; dev_dbg(&pdev->dev, "isr: Rx dpll %s \n", locked ? "locked" : "loss lock"); spdif_priv->dpll_locked = locked ? true : false; } /* Receiver found illegal symbol interrupt handler */ static void spdif_irq_sym_error(struct fsl_spdif_priv *spdif_priv) { struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; dev_dbg(&pdev->dev, "isr: receiver found illegal symbol\n"); /* Clear illegal symbol if DPLL unlocked since no audio stream */ if (!spdif_priv->dpll_locked) regmap_update_bits(regmap, REG_SPDIF_SIE, INT_SYM_ERR, 0); } /* U/Q Channel receive register full */ static void spdif_irq_uqrx_full(struct fsl_spdif_priv *spdif_priv, char name) { struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; u32 *pos, size, val, reg; switch (name) { case 'U': pos = &ctrl->upos; size = SPDIF_UBITS_SIZE; reg = REG_SPDIF_SRU; break; case 'Q': pos = &ctrl->qpos; size = SPDIF_QSUB_SIZE; reg = REG_SPDIF_SRQ; break; default: dev_err(&pdev->dev, "unsupported channel name\n"); return; } dev_dbg(&pdev->dev, "isr: %c Channel receive register full\n", name); if (*pos >= size * 2) { *pos = 0; } else if (unlikely((*pos % size) + 3 > size)) { dev_err(&pdev->dev, "User bit receivce buffer overflow\n"); return; } regmap_read(regmap, reg, &val); ctrl->subcode[*pos++] = val >> 16; ctrl->subcode[*pos++] = val >> 8; ctrl->subcode[*pos++] = val; } /* U/Q Channel sync found */ static void spdif_irq_uq_sync(struct fsl_spdif_priv *spdif_priv) { struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct platform_device *pdev = spdif_priv->pdev; dev_dbg(&pdev->dev, "isr: U/Q Channel sync found\n"); /* U/Q buffer reset */ if (ctrl->qpos == 0) return; /* Set ready to this buffer */ ctrl->ready_buf = (ctrl->qpos - 1) / SPDIF_QSUB_SIZE + 1; } /* U/Q Channel framing error */ static void spdif_irq_uq_err(struct fsl_spdif_priv *spdif_priv) { struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; u32 val; dev_dbg(&pdev->dev, "isr: U/Q Channel framing error\n"); /* Read U/Q data to clear the irq and do buffer reset */ regmap_read(regmap, REG_SPDIF_SRU, &val); regmap_read(regmap, REG_SPDIF_SRQ, &val); /* Drop this U/Q buffer */ ctrl->ready_buf = 0; ctrl->upos = 0; ctrl->qpos = 0; } /* Get spdif interrupt status and clear the interrupt */ static u32 spdif_intr_status_clear(struct fsl_spdif_priv *spdif_priv) { struct regmap *regmap = spdif_priv->regmap; u32 val, val2; regmap_read(regmap, REG_SPDIF_SIS, &val); regmap_read(regmap, REG_SPDIF_SIE, &val2); regmap_write(regmap, REG_SPDIF_SIC, val & val2); return val; } static irqreturn_t spdif_isr(int irq, void *devid) { struct fsl_spdif_priv *spdif_priv = (struct fsl_spdif_priv *)devid; struct platform_device *pdev = spdif_priv->pdev; u32 sis; sis = spdif_intr_status_clear(spdif_priv); if (sis & INT_DPLL_LOCKED) spdif_irq_dpll_lock(spdif_priv); if (sis & INT_TXFIFO_UNOV) dev_dbg(&pdev->dev, "isr: Tx FIFO under/overrun\n"); if (sis & INT_TXFIFO_RESYNC) dev_dbg(&pdev->dev, "isr: Tx FIFO resync\n"); if (sis & INT_CNEW) dev_dbg(&pdev->dev, "isr: cstatus new\n"); if (sis & INT_VAL_NOGOOD) dev_dbg(&pdev->dev, "isr: validity flag no good\n"); if (sis & INT_SYM_ERR) spdif_irq_sym_error(spdif_priv); if (sis & INT_BIT_ERR) dev_dbg(&pdev->dev, "isr: receiver found parity bit error\n"); if (sis & INT_URX_FUL) spdif_irq_uqrx_full(spdif_priv, 'U'); if (sis & INT_URX_OV) dev_dbg(&pdev->dev, "isr: U Channel receive register overrun\n"); if (sis & INT_QRX_FUL) spdif_irq_uqrx_full(spdif_priv, 'Q'); if (sis & INT_QRX_OV) dev_dbg(&pdev->dev, "isr: Q Channel receive register overrun\n"); if (sis & INT_UQ_SYNC) spdif_irq_uq_sync(spdif_priv); if (sis & INT_UQ_ERR) spdif_irq_uq_err(spdif_priv); if (sis & INT_RXFIFO_UNOV) dev_dbg(&pdev->dev, "isr: Rx FIFO under/overrun\n"); if (sis & INT_RXFIFO_RESYNC) dev_dbg(&pdev->dev, "isr: Rx FIFO resync\n"); if (sis & INT_LOSS_LOCK) spdif_irq_dpll_lock(spdif_priv); /* FIXME: Write Tx FIFO to clear TxEm */ if (sis & INT_TX_EM) dev_dbg(&pdev->dev, "isr: Tx FIFO empty\n"); /* FIXME: Read Rx FIFO to clear RxFIFOFul */ if (sis & INT_RXFIFO_FUL) dev_dbg(&pdev->dev, "isr: Rx FIFO full\n"); return IRQ_HANDLED; } static int spdif_softreset(struct fsl_spdif_priv *spdif_priv) { struct regmap *regmap = spdif_priv->regmap; u32 val, cycle = 1000; regmap_write(regmap, REG_SPDIF_SCR, SCR_SOFT_RESET); /* * RESET bit would be cleared after finishing its reset procedure, * which typically lasts 8 cycles. 1000 cycles will keep it safe. */ do { regmap_read(regmap, REG_SPDIF_SCR, &val); } while ((val & SCR_SOFT_RESET) && cycle--); if (cycle) return 0; else return -EBUSY; } static void spdif_set_cstatus(struct spdif_mixer_control *ctrl, u8 mask, u8 cstatus) { ctrl->ch_status[3] &= ~mask; ctrl->ch_status[3] |= cstatus & mask; } static void spdif_write_channel_status(struct fsl_spdif_priv *spdif_priv) { struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; u32 ch_status; ch_status = (bitrev8(ctrl->ch_status[0]) << 16) | (bitrev8(ctrl->ch_status[1]) << 8) | bitrev8(ctrl->ch_status[2]); regmap_write(regmap, REG_SPDIF_STCSCH, ch_status); dev_dbg(&pdev->dev, "STCSCH: 0x%06x\n", ch_status); ch_status = bitrev8(ctrl->ch_status[3]) << 16; regmap_write(regmap, REG_SPDIF_STCSCL, ch_status); dev_dbg(&pdev->dev, "STCSCL: 0x%06x\n", ch_status); } /* Set SPDIF PhaseConfig register for rx clock */ static int spdif_set_rx_clksrc(struct fsl_spdif_priv *spdif_priv, enum spdif_gainsel gainsel, int dpll_locked) { struct regmap *regmap = spdif_priv->regmap; u8 clksrc = spdif_priv->rxclk_src; if (clksrc >= SRPC_CLKSRC_MAX || gainsel >= GAINSEL_MULTI_MAX) return -EINVAL; regmap_update_bits(regmap, REG_SPDIF_SRPC, SRPC_CLKSRC_SEL_MASK | SRPC_GAINSEL_MASK, SRPC_CLKSRC_SEL_SET(clksrc) | SRPC_GAINSEL_SET(gainsel)); return 0; } static int spdif_set_sample_rate(struct snd_pcm_substream *substream, int sample_rate) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; unsigned long csfs = 0; u32 stc, mask, rate; u8 clk, txclk_df, sysclk_df; int ret; switch (sample_rate) { case 32000: rate = SPDIF_TXRATE_32000; csfs = IEC958_AES3_CON_FS_32000; break; case 44100: rate = SPDIF_TXRATE_44100; csfs = IEC958_AES3_CON_FS_44100; break; case 48000: rate = SPDIF_TXRATE_48000; csfs = IEC958_AES3_CON_FS_48000; break; case 96000: rate = SPDIF_TXRATE_96000; csfs = IEC958_AES3_CON_FS_96000; break; case 192000: rate = SPDIF_TXRATE_192000; csfs = IEC958_AES3_CON_FS_192000; break; default: dev_err(&pdev->dev, "unsupported sample rate %d\n", sample_rate); return -EINVAL; } clk = spdif_priv->txclk_src[rate]; if (clk >= STC_TXCLK_SRC_MAX) { dev_err(&pdev->dev, "tx clock source is out of range\n"); return -EINVAL; } txclk_df = spdif_priv->txclk_df[rate]; if (txclk_df == 0) { dev_err(&pdev->dev, "the txclk_df can't be zero\n"); return -EINVAL; } sysclk_df = spdif_priv->sysclk_df[rate]; /* Don't mess up the clocks from other modules */ if (clk != STC_TXCLK_SPDIF_ROOT) goto clk_set_bypass; /* The S/PDIF block needs a clock of 64 * fs * txclk_df */ ret = clk_set_rate(spdif_priv->txclk[rate], 64 * sample_rate * txclk_df); if (ret) { dev_err(&pdev->dev, "failed to set tx clock rate\n"); return ret; } clk_set_bypass: dev_dbg(&pdev->dev, "expected clock rate = %d\n", (64 * sample_rate * txclk_df * sysclk_df)); dev_dbg(&pdev->dev, "actual clock rate = %ld\n", clk_get_rate(spdif_priv->txclk[rate])); /* set fs field in consumer channel status */ spdif_set_cstatus(ctrl, IEC958_AES3_CON_FS, csfs); /* select clock source and divisor */ stc = STC_TXCLK_ALL_EN | STC_TXCLK_SRC_SET(clk) | STC_TXCLK_DF(txclk_df) | STC_SYSCLK_DF(sysclk_df); mask = STC_TXCLK_ALL_EN_MASK | STC_TXCLK_SRC_MASK | STC_TXCLK_DF_MASK | STC_SYSCLK_DF_MASK; regmap_update_bits(regmap, REG_SPDIF_STC, mask, stc); dev_dbg(&pdev->dev, "set sample rate to %dHz for %dHz playback\n", spdif_priv->txrate[rate], sample_rate); return 0; } static int fsl_spdif_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct platform_device *pdev = spdif_priv->pdev; struct regmap *regmap = spdif_priv->regmap; u32 scr, mask; int i; int ret; /* Reset module and interrupts only for first initialization */ if (!cpu_dai->active) { ret = clk_prepare_enable(spdif_priv->coreclk); if (ret) { dev_err(&pdev->dev, "failed to enable core clock\n"); return ret; } ret = spdif_softreset(spdif_priv); if (ret) { dev_err(&pdev->dev, "failed to soft reset\n"); goto err; } /* Disable all the interrupts */ regmap_update_bits(regmap, REG_SPDIF_SIE, 0xffffff, 0); } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { scr = SCR_TXFIFO_AUTOSYNC | SCR_TXFIFO_CTRL_NORMAL | SCR_TXSEL_NORMAL | SCR_USRC_SEL_CHIP | SCR_TXFIFO_FSEL_IF8; mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK | SCR_TXSEL_MASK | SCR_USRC_SEL_MASK | SCR_TXFIFO_FSEL_MASK; for (i = 0; i < SPDIF_TXRATE_MAX; i++) { ret = clk_prepare_enable(spdif_priv->txclk[i]); if (ret) goto disable_txclk; } } else { scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC; mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK| SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK; ret = clk_prepare_enable(spdif_priv->rxclk); if (ret) goto err; } regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr); /* Power up SPDIF module */ regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_LOW_POWER, 0); return 0; disable_txclk: for (i--; i >= 0; i--) clk_disable_unprepare(spdif_priv->txclk[i]); err: clk_disable_unprepare(spdif_priv->coreclk); return ret; } static void fsl_spdif_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct regmap *regmap = spdif_priv->regmap; u32 scr, mask, i; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { scr = 0; mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK | SCR_TXSEL_MASK | SCR_USRC_SEL_MASK | SCR_TXFIFO_FSEL_MASK; for (i = 0; i < SPDIF_TXRATE_MAX; i++) clk_disable_unprepare(spdif_priv->txclk[i]); } else { scr = SCR_RXFIFO_OFF | SCR_RXFIFO_CTL_ZERO; mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK| SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK; clk_disable_unprepare(spdif_priv->rxclk); } regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr); /* Power down SPDIF module only if tx&rx are both inactive */ if (!cpu_dai->active) { spdif_intr_status_clear(spdif_priv); regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_LOW_POWER, SCR_LOW_POWER); clk_disable_unprepare(spdif_priv->coreclk); } } static int fsl_spdif_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; struct platform_device *pdev = spdif_priv->pdev; u32 sample_rate = params_rate(params); int ret = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = spdif_set_sample_rate(substream, sample_rate); if (ret) { dev_err(&pdev->dev, "%s: set sample rate failed: %d\n", __func__, sample_rate); return ret; } spdif_set_cstatus(ctrl, IEC958_AES3_CON_CLOCK, IEC958_AES3_CON_CLOCK_1000PPM); spdif_write_channel_status(spdif_priv); } else { /* Setup rx clock source */ ret = spdif_set_rx_clksrc(spdif_priv, SPDIF_DEFAULT_GAINSEL, 1); } return ret; } static int fsl_spdif_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct regmap *regmap = spdif_priv->regmap; bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; u32 intr = SIE_INTR_FOR(tx); u32 dmaen = SCR_DMA_xX_EN(tx); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: regmap_update_bits(regmap, REG_SPDIF_SIE, intr, intr); regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, dmaen); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0); regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0); break; default: return -EINVAL; } return 0; } static struct snd_soc_dai_ops fsl_spdif_dai_ops = { .startup = fsl_spdif_startup, .hw_params = fsl_spdif_hw_params, .trigger = fsl_spdif_trigger, .shutdown = fsl_spdif_shutdown, }; /* * FSL SPDIF IEC958 controller(mixer) functions * * Channel status get/put control * User bit value get/put control * Valid bit value get control * DPLL lock status get control * User bit sync mode selection control */ static int fsl_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int fsl_spdif_pb_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; uvalue->value.iec958.status[0] = ctrl->ch_status[0]; uvalue->value.iec958.status[1] = ctrl->ch_status[1]; uvalue->value.iec958.status[2] = ctrl->ch_status[2]; uvalue->value.iec958.status[3] = ctrl->ch_status[3]; return 0; } static int fsl_spdif_pb_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; ctrl->ch_status[0] = uvalue->value.iec958.status[0]; ctrl->ch_status[1] = uvalue->value.iec958.status[1]; ctrl->ch_status[2] = uvalue->value.iec958.status[2]; ctrl->ch_status[3] = uvalue->value.iec958.status[3]; spdif_write_channel_status(spdif_priv); return 0; } /* Get channel status from SPDIF_RX_CCHAN register */ static int fsl_spdif_capture_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regmap = spdif_priv->regmap; u32 cstatus, val; regmap_read(regmap, REG_SPDIF_SIS, &val); if (!(val & INT_CNEW)) return -EAGAIN; regmap_read(regmap, REG_SPDIF_SRCSH, &cstatus); ucontrol->value.iec958.status[0] = (cstatus >> 16) & 0xFF; ucontrol->value.iec958.status[1] = (cstatus >> 8) & 0xFF; ucontrol->value.iec958.status[2] = cstatus & 0xFF; regmap_read(regmap, REG_SPDIF_SRCSL, &cstatus); ucontrol->value.iec958.status[3] = (cstatus >> 16) & 0xFF; ucontrol->value.iec958.status[4] = (cstatus >> 8) & 0xFF; ucontrol->value.iec958.status[5] = cstatus & 0xFF; /* Clear intr */ regmap_write(regmap, REG_SPDIF_SIC, INT_CNEW); return 0; } /* * Get User bits (subcode) from chip value which readed out * in UChannel register. */ static int fsl_spdif_subcode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; unsigned long flags; int ret = -EAGAIN; spin_lock_irqsave(&ctrl->ctl_lock, flags); if (ctrl->ready_buf) { int idx = (ctrl->ready_buf - 1) * SPDIF_UBITS_SIZE; memcpy(&ucontrol->value.iec958.subcode[0], &ctrl->subcode[idx], SPDIF_UBITS_SIZE); ret = 0; } spin_unlock_irqrestore(&ctrl->ctl_lock, flags); return ret; } /* Q-subcode information. The byte size is SPDIF_UBITS_SIZE/8 */ static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = SPDIF_QSUB_SIZE; return 0; } /* Get Q subcode from chip value which readed out in QChannel register */ static int fsl_spdif_qget(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control; unsigned long flags; int ret = -EAGAIN; spin_lock_irqsave(&ctrl->ctl_lock, flags); if (ctrl->ready_buf) { int idx = (ctrl->ready_buf - 1) * SPDIF_QSUB_SIZE; memcpy(&ucontrol->value.bytes.data[0], &ctrl->qsub[idx], SPDIF_QSUB_SIZE); ret = 0; } spin_unlock_irqrestore(&ctrl->ctl_lock, flags); return ret; } /* Valid bit information */ static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } /* Get valid good bit from interrupt status register */ static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regmap = spdif_priv->regmap; u32 val; regmap_read(regmap, REG_SPDIF_SIS, &val); ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0; regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD); return 0; } /* DPLL lock information */ static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 16000; uinfo->value.integer.max = 96000; return 0; } static u32 gainsel_multi[GAINSEL_MULTI_MAX] = { 24, 16, 12, 8, 6, 4, 3, }; /* Get RX data clock rate given the SPDIF bus_clk */ static int spdif_get_rxclk_rate(struct fsl_spdif_priv *spdif_priv, enum spdif_gainsel gainsel) { struct regmap *regmap = spdif_priv->regmap; struct platform_device *pdev = spdif_priv->pdev; u64 tmpval64, busclk_freq = 0; u32 freqmeas, phaseconf; u8 clksrc; regmap_read(regmap, REG_SPDIF_SRFM, &freqmeas); regmap_read(regmap, REG_SPDIF_SRPC, &phaseconf); clksrc = (phaseconf >> SRPC_CLKSRC_SEL_OFFSET) & 0xf; /* Get bus clock from system */ if (srpc_dpll_locked[clksrc] && (phaseconf & SRPC_DPLL_LOCKED)) busclk_freq = clk_get_rate(spdif_priv->sysclk); /* FreqMeas_CLK = (BUS_CLK * FreqMeas) / 2 ^ 10 / GAINSEL / 128 */ tmpval64 = (u64) busclk_freq * freqmeas; do_div(tmpval64, gainsel_multi[gainsel] * 1024); do_div(tmpval64, 128 * 1024); dev_dbg(&pdev->dev, "FreqMeas: %d\n", freqmeas); dev_dbg(&pdev->dev, "BusclkFreq: %lld\n", busclk_freq); dev_dbg(&pdev->dev, "RxRate: %lld\n", tmpval64); return (int)tmpval64; } /* * Get DPLL lock or not info from stable interrupt status register. * User application must use this control to get locked, * then can do next PCM operation */ static int fsl_spdif_rxrate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); int rate = 0; if (spdif_priv->dpll_locked) rate = spdif_get_rxclk_rate(spdif_priv, SPDIF_DEFAULT_GAINSEL); ucontrol->value.integer.value[0] = rate; return 0; } /* User bit sync mode info */ static int fsl_spdif_usync_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 1; return 0; } /* * User bit sync mode: * 1 CD User channel subcode * 0 Non-CD data */ static int fsl_spdif_usync_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regmap = spdif_priv->regmap; u32 val; regmap_read(regmap, REG_SPDIF_SRCD, &val); ucontrol->value.integer.value[0] = (val & SRCD_CD_USER) != 0; return 0; } /* * User bit sync mode: * 1 CD User channel subcode * 0 Non-CD data */ static int fsl_spdif_usync_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai); struct regmap *regmap = spdif_priv->regmap; u32 val = ucontrol->value.integer.value[0] << SRCD_CD_USER_OFFSET; regmap_update_bits(regmap, REG_SPDIF_SRCD, SRCD_CD_USER, val); return 0; } /* FSL SPDIF IEC958 controller defines */ static struct snd_kcontrol_new fsl_spdif_ctrls[] = { /* Status cchanel controller */ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_info, .get = fsl_spdif_pb_get, .put = fsl_spdif_pb_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT), .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_info, .get = fsl_spdif_capture_get, }, /* User bits controller */ { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Subcode Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_info, .get = fsl_spdif_subcode_get, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 Q-subcode Capture Default", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_qinfo, .get = fsl_spdif_qget, }, /* Valid bit error controller */ { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 V-Bit Errors", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_vbit_info, .get = fsl_spdif_vbit_get, }, /* DPLL lock info get controller */ { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "RX Sample Rate", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_rxrate_info, .get = fsl_spdif_rxrate_get, }, /* User bit sync mode set/get controller */ { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "IEC958 USyncMode CDText", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = fsl_spdif_usync_info, .get = fsl_spdif_usync_get, .put = fsl_spdif_usync_put, }, }; static int fsl_spdif_dai_probe(struct snd_soc_dai *dai) { struct fsl_spdif_priv *spdif_private = snd_soc_dai_get_drvdata(dai); snd_soc_dai_init_dma_data(dai, &spdif_private->dma_params_tx, &spdif_private->dma_params_rx); snd_soc_add_dai_controls(dai, fsl_spdif_ctrls, ARRAY_SIZE(fsl_spdif_ctrls)); return 0; } static struct snd_soc_dai_driver fsl_spdif_dai = { .probe = &fsl_spdif_dai_probe, .playback = { .stream_name = "CPU-Playback", .channels_min = 2, .channels_max = 2, .rates = FSL_SPDIF_RATES_PLAYBACK, .formats = FSL_SPDIF_FORMATS_PLAYBACK, }, .capture = { .stream_name = "CPU-Capture", .channels_min = 2, .channels_max = 2, .rates = FSL_SPDIF_RATES_CAPTURE, .formats = FSL_SPDIF_FORMATS_CAPTURE, }, .ops = &fsl_spdif_dai_ops, }; static const struct snd_soc_component_driver fsl_spdif_component = { .name = "fsl-spdif", }; /* FSL SPDIF REGMAP */ static bool fsl_spdif_readable_reg(struct device *dev, unsigned int reg) { switch (reg) { case REG_SPDIF_SCR: case REG_SPDIF_SRCD: case REG_SPDIF_SRPC: case REG_SPDIF_SIE: case REG_SPDIF_SIS: case REG_SPDIF_SRL: case REG_SPDIF_SRR: case REG_SPDIF_SRCSH: case REG_SPDIF_SRCSL: case REG_SPDIF_SRU: case REG_SPDIF_SRQ: case REG_SPDIF_STCSCH: case REG_SPDIF_STCSCL: case REG_SPDIF_SRFM: case REG_SPDIF_STC: return true; default: return false; } } static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case REG_SPDIF_SCR: case REG_SPDIF_SRCD: case REG_SPDIF_SRPC: case REG_SPDIF_SIE: case REG_SPDIF_SIC: case REG_SPDIF_STL: case REG_SPDIF_STR: case REG_SPDIF_STCSCH: case REG_SPDIF_STCSCL: case REG_SPDIF_STC: return true; default: return false; } } static const struct regmap_config fsl_spdif_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = REG_SPDIF_STC, .readable_reg = fsl_spdif_readable_reg, .writeable_reg = fsl_spdif_writeable_reg, }; static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv, struct clk *clk, u64 savesub, enum spdif_txrate index, bool round) { const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk); u64 rate_ideal, rate_actual, sub; u32 sysclk_dfmin, sysclk_dfmax; u32 txclk_df, sysclk_df, arate; /* The sysclk has an extra divisor [2, 512] */ sysclk_dfmin = is_sysclk ? 2 : 1; sysclk_dfmax = is_sysclk ? 512 : 1; for (sysclk_df = sysclk_dfmin; sysclk_df <= sysclk_dfmax; sysclk_df++) { for (txclk_df = 1; txclk_df <= 128; txclk_df++) { rate_ideal = rate[index] * txclk_df * 64; if (round) rate_actual = clk_round_rate(clk, rate_ideal); else rate_actual = clk_get_rate(clk); arate = rate_actual / 64; arate /= txclk_df * sysclk_df; if (arate == rate[index]) { /* We are lucky */ savesub = 0; spdif_priv->txclk_df[index] = txclk_df; spdif_priv->sysclk_df[index] = sysclk_df; spdif_priv->txrate[index] = arate; goto out; } else if (arate / rate[index] == 1) { /* A little bigger than expect */ sub = (u64)(arate - rate[index]) * 100000; do_div(sub, rate[index]); if (sub >= savesub) continue; savesub = sub; spdif_priv->txclk_df[index] = txclk_df; spdif_priv->sysclk_df[index] = sysclk_df; spdif_priv->txrate[index] = arate; } else if (rate[index] / arate == 1) { /* A little smaller than expect */ sub = (u64)(rate[index] - arate) * 100000; do_div(sub, rate[index]); if (sub >= savesub) continue; savesub = sub; spdif_priv->txclk_df[index] = txclk_df; spdif_priv->sysclk_df[index] = sysclk_df; spdif_priv->txrate[index] = arate; } } } out: return savesub; } static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv, enum spdif_txrate index) { const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 }; struct platform_device *pdev = spdif_priv->pdev; struct device *dev = &pdev->dev; u64 savesub = 100000, ret; struct clk *clk; char tmp[16]; int i; for (i = 0; i < STC_TXCLK_SRC_MAX; i++) { sprintf(tmp, "rxtx%d", i); clk = devm_clk_get(&pdev->dev, tmp); if (IS_ERR(clk)) { dev_err(dev, "no rxtx%d clock in devicetree\n", i); return PTR_ERR(clk); } if (!clk_get_rate(clk)) continue; ret = fsl_spdif_txclk_caldiv(spdif_priv, clk, savesub, index, i == STC_TXCLK_SPDIF_ROOT); if (savesub == ret) continue; savesub = ret; spdif_priv->txclk[index] = clk; spdif_priv->txclk_src[index] = i; /* To quick catch a divisor, we allow a 0.1% deviation */ if (savesub < 100) break; } dev_dbg(&pdev->dev, "use rxtx%d as tx clock source for %dHz sample rate\n", spdif_priv->txclk_src[index], rate[index]); dev_dbg(&pdev->dev, "use txclk df %d for %dHz sample rate\n", spdif_priv->txclk_df[index], rate[index]); if (clk_is_match(spdif_priv->txclk[index], spdif_priv->sysclk)) dev_dbg(&pdev->dev, "use sysclk df %d for %dHz sample rate\n", spdif_priv->sysclk_df[index], rate[index]); dev_dbg(&pdev->dev, "the best rate for %dHz sample rate is %dHz\n", rate[index], spdif_priv->txrate[index]); return 0; } static int fsl_spdif_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct fsl_spdif_priv *spdif_priv; struct spdif_mixer_control *ctrl; struct resource *res; void __iomem *regs; int irq, ret, i; if (!np) return -ENODEV; spdif_priv = devm_kzalloc(&pdev->dev, sizeof(*spdif_priv), GFP_KERNEL); if (!spdif_priv) return -ENOMEM; spdif_priv->pdev = pdev; /* Initialize this copy of the CPU DAI driver structure */ memcpy(&spdif_priv->cpu_dai_drv, &fsl_spdif_dai, sizeof(fsl_spdif_dai)); spdif_priv->cpu_dai_drv.name = dev_name(&pdev->dev); /* Get the addresses and IRQ */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); spdif_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "core", regs, &fsl_spdif_regmap_config); if (IS_ERR(spdif_priv->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); return PTR_ERR(spdif_priv->regmap); } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq for node %s\n", pdev->name); return irq; } ret = devm_request_irq(&pdev->dev, irq, spdif_isr, 0, dev_name(&pdev->dev), spdif_priv); if (ret) { dev_err(&pdev->dev, "could not claim irq %u\n", irq); return ret; } /* Get system clock for rx clock rate calculation */ spdif_priv->sysclk = devm_clk_get(&pdev->dev, "rxtx5"); if (IS_ERR(spdif_priv->sysclk)) { dev_err(&pdev->dev, "no sys clock (rxtx5) in devicetree\n"); return PTR_ERR(spdif_priv->sysclk); } /* Get core clock for data register access via DMA */ spdif_priv->coreclk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(spdif_priv->coreclk)) { dev_err(&pdev->dev, "no core clock in devicetree\n"); return PTR_ERR(spdif_priv->coreclk); } /* Select clock source for rx/tx clock */ spdif_priv->rxclk = devm_clk_get(&pdev->dev, "rxtx1"); if (IS_ERR(spdif_priv->rxclk)) { dev_err(&pdev->dev, "no rxtx1 clock in devicetree\n"); return PTR_ERR(spdif_priv->rxclk); } spdif_priv->rxclk_src = DEFAULT_RXCLK_SRC; for (i = 0; i < SPDIF_TXRATE_MAX; i++) { ret = fsl_spdif_probe_txclk(spdif_priv, i); if (ret) return ret; } /* Initial spinlock for control data */ ctrl = &spdif_priv->fsl_spdif_control; spin_lock_init(&ctrl->ctl_lock); /* Init tx channel status default value */ ctrl->ch_status[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_5015; ctrl->ch_status[1] = IEC958_AES1_CON_DIGDIGCONV_ID; ctrl->ch_status[2] = 0x00; ctrl->ch_status[3] = IEC958_AES3_CON_FS_44100 | IEC958_AES3_CON_CLOCK_1000PPM; spdif_priv->dpll_locked = false; spdif_priv->dma_params_tx.maxburst = FSL_SPDIF_TXFIFO_WML; spdif_priv->dma_params_rx.maxburst = FSL_SPDIF_RXFIFO_WML; spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL; spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL; /* Register with ASoC */ dev_set_drvdata(&pdev->dev, spdif_priv); ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component, &spdif_priv->cpu_dai_drv, 1); if (ret) { dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); return ret; } ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE); if (ret) dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret); return ret; } static const struct of_device_id fsl_spdif_dt_ids[] = { { .compatible = "fsl,imx35-spdif", }, { .compatible = "fsl,vf610-spdif", }, {} }; MODULE_DEVICE_TABLE(of, fsl_spdif_dt_ids); static struct platform_driver fsl_spdif_driver = { .driver = { .name = "fsl-spdif-dai", .of_match_table = fsl_spdif_dt_ids, }, .probe = fsl_spdif_probe, }; module_platform_driver(fsl_spdif_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("Freescale S/PDIF CPU DAI Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:fsl-spdif-dai");
gpl-2.0
chenyu105/linux
drivers/gpu/drm/nouveau/dispnv04/arb.c
457
7968
/* * Copyright 1993-2003 NVIDIA, Corporation * Copyright 2007-2009 Stuart Bennett * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <drm/drmP.h> #include "nouveau_drm.h" #include "nouveau_reg.h" #include "hw.h" /****************************************************************************\ * * * The video arbitration routines calculate some "magic" numbers. Fixes * * the snow seen when accessing the framebuffer without it. * * It just works (I hope). * * * \****************************************************************************/ struct nv_fifo_info { int lwm; int burst; }; struct nv_sim_state { int pclk_khz; int mclk_khz; int nvclk_khz; int bpp; int mem_page_miss; int mem_latency; int memory_type; int memory_width; int two_heads; }; static void nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) { int pagemiss, cas, width, bpp; int nvclks, mclks, pclks, crtpagemiss; int found, mclk_extra, mclk_loop, cbs, m1, p1; int mclk_freq, pclk_freq, nvclk_freq; int us_m, us_n, us_p, crtc_drain_rate; int cpm_us, us_crt, clwm; pclk_freq = arb->pclk_khz; mclk_freq = arb->mclk_khz; nvclk_freq = arb->nvclk_khz; pagemiss = arb->mem_page_miss; cas = arb->mem_latency; width = arb->memory_width >> 6; bpp = arb->bpp; cbs = 128; pclks = 2; nvclks = 10; mclks = 13 + cas; mclk_extra = 3; found = 0; while (!found) { found = 1; mclk_loop = mclks + mclk_extra; us_m = mclk_loop * 1000 * 1000 / mclk_freq; us_n = nvclks * 1000 * 1000 / nvclk_freq; us_p = nvclks * 1000 * 1000 / pclk_freq; crtc_drain_rate = pclk_freq * bpp / 8; crtpagemiss = 2; crtpagemiss += 1; cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq; us_crt = cpm_us + us_m + us_n + us_p; clwm = us_crt * crtc_drain_rate / (1000 * 1000); clwm++; m1 = clwm + cbs - 512; p1 = m1 * pclk_freq / mclk_freq; p1 = p1 * bpp / 8; if ((p1 < m1 && m1 > 0) || clwm > 519) { found = !mclk_extra; mclk_extra--; } if (clwm < 384) clwm = 384; fifo->lwm = clwm; fifo->burst = cbs; } } static void nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb) { int fill_rate, drain_rate; int pclks, nvclks, mclks, xclks; int pclk_freq, nvclk_freq, mclk_freq; int fill_lat, extra_lat; int max_burst_o, max_burst_l; int fifo_len, min_lwm, max_lwm; const int burst_lat = 80; /* Maximum allowable latency due * to the CRTC FIFO burst. (ns) */ pclk_freq = arb->pclk_khz; nvclk_freq = arb->nvclk_khz; mclk_freq = arb->mclk_khz; fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */ drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */ fifo_len = arb->two_heads ? 1536 : 1024; /* B */ /* Fixed FIFO refill latency. */ pclks = 4; /* lwm detect. */ nvclks = 3 /* lwm -> sync. */ + 2 /* fbi bus cycles (1 req + 1 busy) */ + 1 /* 2 edge sync. may be very close to edge so * just put one. */ + 1 /* fbi_d_rdv_n */ + 1 /* Fbi_d_rdata */ + 1; /* crtfifo load */ mclks = 1 /* 2 edge sync. may be very close to edge so * just put one. */ + 1 /* arb_hp_req */ + 5 /* tiling pipeline */ + 2 /* latency fifo */ + 2 /* memory request to fbio block */ + 7; /* data returned from fbio block */ /* Need to accumulate 256 bits for read */ mclks += (arb->memory_type == 0 ? 2 : 1) * arb->memory_width / 32; fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */ + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */ + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */ /* Conditional FIFO refill latency. */ xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to * the overlay. */ + 2 * arb->mem_page_miss /* Extra pagemiss latency. */ + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */ extra_lat = xclks * 1000 * 1000 / mclk_freq; if (arb->two_heads) /* Account for another CRTC. */ extra_lat += fill_lat + extra_lat + burst_lat; /* FIFO burst */ /* Max burst not leading to overflows. */ max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000)) * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000); fifo->burst = min(max_burst_o, 1024); /* Max burst value with an acceptable latency. */ max_burst_l = burst_lat * fill_rate / (1000 * 1000); fifo->burst = min(max_burst_l, fifo->burst); fifo->burst = rounddown_pow_of_two(fifo->burst); /* FIFO low watermark */ min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1; max_lwm = fifo_len - fifo->burst + fill_lat * drain_rate / (1000 * 1000) + fifo->burst * drain_rate / fill_rate; fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */ } static void nv04_update_arb(struct drm_device *dev, int VClk, int bpp, int *burst, int *lwm) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvif_object *device = &nouveau_drm(dev)->device.object; struct nv_fifo_info fifo_data; struct nv_sim_state sim_data; int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1); sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; sim_data.bpp = bpp; sim_data.two_heads = nv_two_heads(dev); if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { uint32_t type; pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); sim_data.memory_type = (type >> 12) & 1; sim_data.memory_width = 64; sim_data.mem_latency = 3; sim_data.mem_page_miss = 10; } else { sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1; sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; sim_data.mem_latency = cfg1 & 0xf; sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); } if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) nv04_calc_arb(&fifo_data, &sim_data); else nv10_calc_arb(&fifo_data, &sim_data); *burst = ilog2(fifo_data.burst >> 4); *lwm = fifo_data.lwm >> 3; } static void nv20_update_arb(int *burst, int *lwm) { unsigned int fifo_size, burst_size, graphics_lwm; fifo_size = 2048; burst_size = 512; graphics_lwm = fifo_size - burst_size; *burst = ilog2(burst_size >> 5); *lwm = graphics_lwm >> 3; } void nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) { struct nouveau_drm *drm = nouveau_drm(dev); if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) nv04_update_arb(dev, vclk, bpp, burst, lwm); else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { *burst = 128; *lwm = 0x0480; } else nv20_update_arb(burst, lwm); }
gpl-2.0
Renesas-EMEV2/Kernel
arch/arm/mach-realview/realview_pb1176.c
713
10790
/* * linux/arch/arm/mach-realview/realview_pb1176.c * * Copyright (C) 2008 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/amba/bus.h> #include <linux/amba/pl061.h> #include <linux/amba/mmci.h> #include <linux/io.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/hardware/cache-l2x0.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/board-pb1176.h> #include <mach/irqs.h> #include "core.h" static struct map_desc realview_pb1176_io_desc[] __initdata = { { .virtual = IO_ADDRESS(REALVIEW_SYS_BASE), .pfn = __phys_to_pfn(REALVIEW_SYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB1176_GIC_CPU_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_GIC_CPU_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB1176_GIC_DIST_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_GIC_DIST_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_DC1176_GIC_CPU_BASE), .pfn = __phys_to_pfn(REALVIEW_DC1176_GIC_CPU_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_DC1176_GIC_DIST_BASE), .pfn = __phys_to_pfn(REALVIEW_DC1176_GIC_DIST_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE), .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB1176_TIMER0_1_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_TIMER0_1_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB1176_TIMER2_3_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_TIMER2_3_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PB1176_L220_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_L220_BASE), .length = SZ_8K, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_LL { .virtual = IO_ADDRESS(REALVIEW_PB1176_UART0_BASE), .pfn = __phys_to_pfn(REALVIEW_PB1176_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE, }, #endif }; static void __init realview_pb1176_map_io(void) { iotable_init(realview_pb1176_io_desc, ARRAY_SIZE(realview_pb1176_io_desc)); } static struct pl061_platform_data gpio0_plat_data = { .gpio_base = 0, .irq_base = -1, }; static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = -1, }; static struct pl061_platform_data gpio2_plat_data = { .gpio_base = 16, .irq_base = -1, }; /* * RealView PB1176 AMBA devices */ #define GPIO2_IRQ { IRQ_PB1176_GPIO2, NO_IRQ } #define GPIO2_DMA { 0, 0 } #define GPIO3_IRQ { IRQ_PB1176_GPIO3, NO_IRQ } #define GPIO3_DMA { 0, 0 } #define AACI_IRQ { IRQ_PB1176_AACI, NO_IRQ } #define AACI_DMA { 0x80, 0x81 } #define MMCI0_IRQ { IRQ_PB1176_MMCI0A, IRQ_PB1176_MMCI0B } #define MMCI0_DMA { 0x84, 0 } #define KMI0_IRQ { IRQ_PB1176_KMI0, NO_IRQ } #define KMI0_DMA { 0, 0 } #define KMI1_IRQ { IRQ_PB1176_KMI1, NO_IRQ } #define KMI1_DMA { 0, 0 } #define PB1176_SMC_IRQ { NO_IRQ, NO_IRQ } #define PB1176_SMC_DMA { 0, 0 } #define MPMC_IRQ { NO_IRQ, NO_IRQ } #define MPMC_DMA { 0, 0 } #define PB1176_CLCD_IRQ { IRQ_DC1176_CLCD, NO_IRQ } #define PB1176_CLCD_DMA { 0, 0 } #define DMAC_IRQ { IRQ_PB1176_DMAC, NO_IRQ } #define DMAC_DMA { 0, 0 } #define SCTL_IRQ { NO_IRQ, NO_IRQ } #define SCTL_DMA { 0, 0 } #define PB1176_WATCHDOG_IRQ { IRQ_DC1176_WATCHDOG, NO_IRQ } #define PB1176_WATCHDOG_DMA { 0, 0 } #define PB1176_GPIO0_IRQ { IRQ_PB1176_GPIO0, NO_IRQ } #define PB1176_GPIO0_DMA { 0, 0 } #define GPIO1_IRQ { IRQ_PB1176_GPIO1, NO_IRQ } #define GPIO1_DMA { 0, 0 } #define PB1176_RTC_IRQ { IRQ_DC1176_RTC, NO_IRQ } #define PB1176_RTC_DMA { 0, 0 } #define SCI_IRQ { IRQ_PB1176_SCI, NO_IRQ } #define SCI_DMA { 7, 6 } #define PB1176_UART0_IRQ { IRQ_DC1176_UART0, NO_IRQ } #define PB1176_UART0_DMA { 15, 14 } #define PB1176_UART1_IRQ { IRQ_DC1176_UART1, NO_IRQ } #define PB1176_UART1_DMA { 13, 12 } #define PB1176_UART2_IRQ { IRQ_DC1176_UART2, NO_IRQ } #define PB1176_UART2_DMA { 11, 10 } #define PB1176_UART3_IRQ { IRQ_DC1176_UART3, NO_IRQ } #define PB1176_UART3_DMA { 0x86, 0x87 } #define PB1176_SSP_IRQ { IRQ_PB1176_SSP, NO_IRQ } #define PB1176_SSP_DMA { 9, 8 } /* FPGA Primecells */ AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL); AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data); AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL); AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL); AMBA_DEVICE(uart3, "fpga:uart3", PB1176_UART3, NULL); /* DevChip Primecells */ AMBA_DEVICE(smc, "dev:smc", PB1176_SMC, NULL); AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL); AMBA_DEVICE(wdog, "dev:wdog", PB1176_WATCHDOG, NULL); AMBA_DEVICE(gpio0, "dev:gpio0", PB1176_GPIO0, &gpio0_plat_data); AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data); AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data); AMBA_DEVICE(rtc, "dev:rtc", PB1176_RTC, NULL); AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL); AMBA_DEVICE(uart0, "dev:uart0", PB1176_UART0, NULL); AMBA_DEVICE(uart1, "dev:uart1", PB1176_UART1, NULL); AMBA_DEVICE(uart2, "dev:uart2", PB1176_UART2, NULL); AMBA_DEVICE(ssp0, "dev:ssp0", PB1176_SSP, NULL); /* Primecells on the NEC ISSP chip */ AMBA_DEVICE(clcd, "issp:clcd", PB1176_CLCD, &clcd_plat_data); //AMBA_DEVICE(dmac, "issp:dmac", PB1176_DMAC, NULL); static struct amba_device *amba_devs[] __initdata = { // &dmac_device, &uart0_device, &uart1_device, &uart2_device, &uart3_device, &smc_device, &clcd_device, &sctl_device, &wdog_device, &gpio0_device, &gpio1_device, &gpio2_device, &rtc_device, &sci0_device, &ssp0_device, &aaci_device, &mmc0_device, &kmi0_device, &kmi1_device, }; /* * RealView PB1176 platform devices */ static struct resource realview_pb1176_flash_resources[] = { [0] = { .start = REALVIEW_PB1176_FLASH_BASE, .end = REALVIEW_PB1176_FLASH_BASE + REALVIEW_PB1176_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = REALVIEW_PB1176_SEC_FLASH_BASE, .end = REALVIEW_PB1176_SEC_FLASH_BASE + REALVIEW_PB1176_SEC_FLASH_SIZE - 1, .flags = IORESOURCE_MEM, }, }; #ifdef CONFIG_REALVIEW_PB1176_SECURE_FLASH #define PB1176_FLASH_BLOCKS 2 #else #define PB1176_FLASH_BLOCKS 1 #endif static struct resource realview_pb1176_smsc911x_resources[] = { [0] = { .start = REALVIEW_PB1176_ETH_BASE, .end = REALVIEW_PB1176_ETH_BASE + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PB1176_ETH, .end = IRQ_PB1176_ETH, .flags = IORESOURCE_IRQ, }, }; static struct resource realview_pb1176_isp1761_resources[] = { [0] = { .start = REALVIEW_PB1176_USB_BASE, .end = REALVIEW_PB1176_USB_BASE + SZ_128K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PB1176_USB, .end = IRQ_PB1176_USB, .flags = IORESOURCE_IRQ, }, }; static struct resource pmu_resource = { .start = IRQ_DC1176_CORE_PMU, .end = IRQ_DC1176_CORE_PMU, .flags = IORESOURCE_IRQ, }; static struct platform_device pmu_device = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .num_resources = 1, .resource = &pmu_resource, }; static void __init gic_init_irq(void) { /* ARM1176 DevChip GIC, primary */ gic_cpu_base_addr = __io_address(REALVIEW_DC1176_GIC_CPU_BASE); gic_dist_init(0, __io_address(REALVIEW_DC1176_GIC_DIST_BASE), IRQ_DC1176_GIC_START); gic_cpu_init(0, gic_cpu_base_addr); /* board GIC, secondary */ gic_dist_init(1, __io_address(REALVIEW_PB1176_GIC_DIST_BASE), IRQ_PB1176_GIC_START); gic_cpu_init(1, __io_address(REALVIEW_PB1176_GIC_CPU_BASE)); gic_cascade_irq(1, IRQ_DC1176_PB_IRQ1); } static void __init realview_pb1176_timer_init(void) { timer0_va_base = __io_address(REALVIEW_PB1176_TIMER0_1_BASE); timer1_va_base = __io_address(REALVIEW_PB1176_TIMER0_1_BASE) + 0x20; timer2_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE); timer3_va_base = __io_address(REALVIEW_PB1176_TIMER2_3_BASE) + 0x20; realview_timer_init(IRQ_DC1176_TIMER0); } static struct sys_timer realview_pb1176_timer = { .init = realview_pb1176_timer_init, }; static void realview_pb1176_reset(char mode) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl); __raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl); } static void realview_pb1176_fixup(struct machine_desc *mdesc, struct tag *tags, char **from, struct meminfo *meminfo) { /* * RealView PB1176 only has 128MB of RAM mapped at 0. */ meminfo->bank[0].start = 0; meminfo->bank[0].size = SZ_128M; meminfo->nr_banks = 1; } static void __init realview_pb1176_init(void) { int i; #ifdef CONFIG_CACHE_L2X0 /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */ l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); #endif realview_flash_register(realview_pb1176_flash_resources, PB1176_FLASH_BLOCKS); realview_eth_register(NULL, realview_pb1176_smsc911x_resources); platform_device_register(&realview_i2c_device); realview_usb_register(realview_pb1176_isp1761_resources); platform_device_register(&pmu_device); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } #ifdef CONFIG_LEDS leds_event = realview_leds_event; #endif realview_reset = realview_pb1176_reset; } MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .phys_io = REALVIEW_PB1176_UART0_BASE & SECTION_MASK, .io_pg_offst = (IO_ADDRESS(REALVIEW_PB1176_UART0_BASE) >> 18) & 0xfffc, .boot_params = PHYS_OFFSET + 0x00000100, .fixup = realview_pb1176_fixup, .map_io = realview_pb1176_map_io, .init_irq = gic_init_irq, .timer = &realview_pb1176_timer, .init_machine = realview_pb1176_init, MACHINE_END
gpl-2.0
TeamRefuse/android_kernel_samsung_dempsey
drivers/ata/sata_mv.c
713
122988
/* * sata_mv.c - Marvell SATA support * * Copyright 2008-2009: Marvell Corporation, all rights reserved. * Copyright 2005: EMC Corporation, all rights reserved. * Copyright 2005 Red Hat, Inc. All rights reserved. * * Originally written by Brett Russ. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. * * Please ALWAYS copy linux-ide@vger.kernel.org on emails. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * sata_mv TODO list: * * --> Develop a low-power-consumption strategy, and implement it. * * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. * * --> [Experiment, Marvell value added] Is it possible to use target * mode to cross-connect two Linux boxes with Marvell cards? If so, * creating LibATA target mode support would be very interesting. * * Target mode, for those without docs, is the ability to directly * connect two SATA ports. */ /* * 80x1-B2 errata PCI#11: * * Users of the 6041/6081 Rev.B2 chips (current is C0) * should be careful to insert those cards only onto PCI-X bus #0, * and only in device slots 0..7, not higher. The chips may not * work correctly otherwise (note: this is a pretty rare condition). */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mbus.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/libata.h> #define DRV_NAME "sata_mv" #define DRV_VERSION "1.28" /* * module options */ static int msi; #ifdef CONFIG_PCI module_param(msi, int, S_IRUGO); MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); #endif static int irq_coalescing_io_count; module_param(irq_coalescing_io_count, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_io_count, "IRQ coalescing I/O count threshold (0..255)"); static int irq_coalescing_usecs; module_param(irq_coalescing_usecs, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_usecs, "IRQ coalescing time threshold in usecs"); enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ MV_IO_BAR = 2, /* offset 0x18: IO space */ MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ MAX_COAL_IO_COUNT = 255, /* completed I/O count */ MV_PCI_REG_BASE = 0, /* * Per-chip ("all ports") interrupt coalescing feature. * This is only for GEN_II / GEN_IIE hardware. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ COAL_REG_BASE = 0x18000, IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), /* * Registers for the (unused here) transaction coalescing feature: */ TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), SATAHC0_REG_BASE = 0x20000, FLASH_CTL = 0x1046c, GPIO_PORT_CTL = 0x104f0, RESET_CFG = 0x180d8, MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, MV_MAX_Q_DEPTH = 32, MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, /* CRQB needs alignment on a 1KB boundary. Size == 1KB * CRPB needs alignment on a 256B boundary. Size == 256B * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B */ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), MV_MAX_SG_CT = 256, MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ MV_PORT_HC_SHIFT = 2, MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, CRQB_FLAG_READ = (1 << 0), CRQB_TAG_SHIFT = 1, CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ CRQB_CMD_ADDR_SHIFT = 8, CRQB_CMD_CS = (0x2 << 11), CRQB_CMD_LAST = (1 << 15), CRPB_FLAG_STATUS_SHIFT = 8, CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ EPRD_FLAG_END_OF_TBL = (1 << 31), /* PCI interface registers */ MV_PCI_COMMAND = 0xc00, MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ PCI_MAIN_CMD_STS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), GLOB_SFT_RST = (1 << 4), MV_PCI_MODE = 0xd00, MV_PCI_MODE_MASK = 0x30, MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, MV_PCI_DISC_TIMER = 0xd04, MV_PCI_MSI_TRIGGER = 0xc38, MV_PCI_SERR_MASK = 0xc28, MV_PCI_XBAR_TMOUT = 0x1d04, MV_PCI_ERR_LOW_ADDRESS = 0x1d40, MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, PCI_IRQ_CAUSE = 0x1d58, PCI_IRQ_MASK = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ PCIE_IRQ_CAUSE = 0x1900, PCIE_IRQ_MASK = 0x1910, PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, PCI_HC_MAIN_IRQ_MASK = 0x1d64, SOC_HC_MAIN_IRQ_CAUSE = 0x20020, SOC_HC_MAIN_IRQ_MASK = 0x20024, ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ PCI_ERR = (1 << 18), TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ GPIO_INT = (1 << 22), SELF_INT = (1 << 23), TWSI_INT = (1 << 24), HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ /* SATAHC registers */ HC_CFG = 0x00, HC_IRQ_CAUSE = 0x14, DMA_IRQ = (1 << 0), /* shift by port # */ HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ DEV_IRQ = (1 << 8), /* shift by port # */ /* * Per-HC (Host-Controller) interrupt coalescing feature. * This is present on all chip generations. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ HC_IRQ_COAL_IO_THRESHOLD = 0x000c, HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, SOC_LED_CTRL = 0x2c, SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ /* with dev activity LED */ /* Shadow block registers */ SHD_BLK = 0x100, SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ /* SATA registers */ SATA_STATUS = 0x300, /* ctrl, err regs follow status */ SATA_ACTIVE = 0x350, FIS_IRQ_CAUSE = 0x364, FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ LTMODE = 0x30c, /* requires read-after-write */ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ PHY_MODE2 = 0x330, PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, /* requires read-after-write */ PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ SATA_IFCTL = 0x344, SATA_TESTCTL = 0x348, SATA_IFSTAT = 0x34c, VENDOR_UNIQUE_FIS = 0x35c, FISCFG = 0x360, FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ PHY_MODE9_GEN2 = 0x398, PHY_MODE9_GEN1 = 0x39c, PHYCFG_OFS = 0x3a0, /* only in 65n devices */ MV5_PHY_MODE = 0x74, MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, SATA_IFCFG = 0x050, MV_M2_PREAMP_MASK = 0x7e0, /* Port registers */ EDMA_CFG = 0, EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ EDMA_ERR_IRQ_CAUSE = 0x8, EDMA_ERR_IRQ_MASK = 0xc, EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ EDMA_ERR_DEV = (1 << 2), /* device error */ EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ EDMA_ERR_OVERRUN_5 = (1 << 5), EDMA_ERR_UNDERRUN_5 = (1 << 6), EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | EDMA_ERR_LNK_CTRL_RX_1 | EDMA_ERR_LNK_CTRL_RX_3 | EDMA_ERR_LNK_CTRL_TX, EDMA_EH_FREEZE = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_SERR | EDMA_ERR_SELF_DIS | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | EDMA_ERR_LNK_DATA_RX | EDMA_ERR_LNK_DATA_TX | EDMA_ERR_TRANS_PROTO, EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_OVERRUN_5 | EDMA_ERR_UNDERRUN_5 | EDMA_ERR_SELF_DIS_5 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY, EDMA_REQ_Q_BASE_HI = 0x10, EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ EDMA_REQ_Q_OUT_PTR = 0x18, EDMA_REQ_Q_PTR_SHIFT = 5, EDMA_RSP_Q_BASE_HI = 0x1c, EDMA_RSP_Q_IN_PTR = 0x20, EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ EDMA_RSP_Q_PTR_SHIFT = 3, EDMA_CMD = 0x28, /* EDMA command register */ EDMA_EN = (1 << 0), /* enable EDMA */ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ EDMA_STATUS = 0x30, /* EDMA engine status */ EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ EDMA_IORDY_TMOUT = 0x34, EDMA_ARB_CFG = 0x38, EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ BMDMA_CMD = 0x224, /* bmdma command register */ BMDMA_STATUS = 0x228, /* bmdma status register */ BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ /* Host private flags (hp_flags) */ MV_HP_FLAG_MSI = (1 << 0), MV_HP_ERRATA_50XXB0 = (1 << 1), MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ }; #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) enum { /* DMA boundary 0xffff is required by the s/g splitting * we need on /length/ in mv_fill-sg(). */ MV_DMA_BOUNDARY = 0xffffU, /* mask of register bits containing lower 32 bits * of EDMA request queue DMA address */ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, /* ditto, for response queue */ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, }; enum chip_type { chip_504x, chip_508x, chip_5080, chip_604x, chip_608x, chip_6042, chip_7042, chip_soc, }; /* Command ReQuest Block: 32B */ struct mv_crqb { __le32 sg_addr; __le32 sg_addr_hi; __le16 ctrl_flags; __le16 ata_cmd[11]; }; struct mv_crqb_iie { __le32 addr; __le32 addr_hi; __le32 flags; __le32 len; __le32 ata_cmd[4]; }; /* Command ResPonse Block: 8B */ struct mv_crpb { __le16 id; __le16 flags; __le32 tmstmp; }; /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ struct mv_sg { __le32 addr; __le32 flags_size; __le32 addr_hi; __le32 reserved; }; /* * We keep a local cache of a few frequently accessed port * registers here, to avoid having to read them (very slow) * when switching between EDMA and non-EDMA modes. */ struct mv_cached_regs { u32 fiscfg; u32 ltmode; u32 haltcond; u32 unknown_rsvd; }; struct mv_port_priv { struct mv_crqb *crqb; dma_addr_t crqb_dma; struct mv_crpb *crpb; dma_addr_t crpb_dma; struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; unsigned int req_idx; unsigned int resp_idx; u32 pp_flags; struct mv_cached_regs cached; unsigned int delayed_eh_pmp_map; }; struct mv_port_signal { u32 amps; u32 pre; }; struct mv_host_priv { u32 hp_flags; unsigned int board_idx; u32 main_irq_mask; struct mv_port_signal signal[8]; const struct mv_hw_ops *ops; int n_ports; void __iomem *base; void __iomem *main_irq_cause_addr; void __iomem *main_irq_mask_addr; u32 irq_cause_offset; u32 irq_mask_offset; u32 unmask_all_irqs; #if defined(CONFIG_HAVE_CLK) struct clk *clk; #endif /* * These consistent DMA memory pools give us guaranteed * alignment for hardware-accessed data structures, * and less memory waste in accomplishing the alignment. */ struct dma_pool *crqb_pool; struct dma_pool *crpb_pool; struct dma_pool *sg_tbl_pool; }; struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*read_preamp)(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*reset_bus)(struct ata_host *host, void __iomem *mmio); }; static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); static void mv6_dev_config(struct ata_device *dev); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); static int mv_stop_edma(struct ata_port *ap); static int mv_stop_edma_engine(void __iomem *port_mmio); static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); static void mv_pmp_select(struct ata_port *ap, int pmp); static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_pmp_error_handler(struct ata_port *ap); static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp); static void mv_sff_irq_clear(struct ata_port *ap); static int mv_check_atapi_dma(struct ata_queued_cmd *qc); static void mv_bmdma_setup(struct ata_queued_cmd *qc); static void mv_bmdma_start(struct ata_queued_cmd *qc); static void mv_bmdma_stop(struct ata_queued_cmd *qc); static u8 mv_bmdma_status(struct ata_port *ap); static u8 mv_sff_check_status(struct ata_port *ap); /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below * because we have to allow room for worst case splitting of * PRDs for 64K boundaries in mv_fill_sg(). */ static struct scsi_host_template mv5_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; static struct scsi_host_template mv6_sht = { ATA_NCQ_SHT(DRV_NAME), .can_queue = MV_MAX_Q_DEPTH - 1, .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; static struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv6_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .dev_config = mv6_dev_config, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .softreset = mv_softreset, .pmp_hardreset = mv_pmp_hardreset, .pmp_softreset = mv_softreset, .error_handler = mv_pmp_error_handler, .scr_read = mv_scr_read, .scr_write = mv_scr_write, .sff_check_status = mv_sff_check_status, .sff_irq_clear = mv_sff_irq_clear, .check_atapi_dma = mv_check_atapi_dma, .bmdma_setup = mv_bmdma_setup, .bmdma_start = mv_bmdma_start, .bmdma_stop = mv_bmdma_stop, .bmdma_status = mv_bmdma_status, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, }; static const struct ata_port_info mv_port_info[] = { { /* chip_504x */ .flags = MV_GEN_I_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_508x */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_5080 */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_604x */ .flags = MV_GEN_II_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_soc */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, }; static const struct pci_device_id mv_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, /* RocketRAID 1720/174x have different identifiers */ { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, /* Highpoint RocketRAID PCIe series */ { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, { } /* terminate list */ }; static const struct mv_hw_ops mv5xxx_ops = { .phy_errata = mv5_phy_errata, .enable_leds = mv5_enable_leds, .read_preamp = mv5_read_preamp, .reset_hc = mv5_reset_hc, .reset_flash = mv5_reset_flash, .reset_bus = mv5_reset_bus, }; static const struct mv_hw_ops mv6xxx_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv6_enable_leds, .read_preamp = mv6_read_preamp, .reset_hc = mv6_reset_hc, .reset_flash = mv6_reset_flash, .reset_bus = mv_reset_pci_bus, }; static const struct mv_hw_ops mv_soc_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv_soc_enable_leds, .read_preamp = mv_soc_read_preamp, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; static const struct mv_hw_ops mv_soc_65n_ops = { .phy_errata = mv_soc_65n_phy_errata, .enable_leds = mv_soc_enable_leds, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; /* * Functions */ static inline void writelfl(unsigned long data, void __iomem *addr) { writel(data, addr); (void) readl(addr); /* flush to avoid PCI posted write */ } static inline unsigned int mv_hc_from_port(unsigned int port) { return port >> MV_PORT_HC_SHIFT; } static inline unsigned int mv_hardport_from_port(unsigned int port) { return port & MV_PORT_MASK; } /* * Consolidate some rather tricky bit shift calculations. * This is hot-path stuff, so not a function. * Simple code, with two return values, so macro rather than inline. * * port is the sole input, in range 0..7. * shift is one output, for use with main_irq_cause / main_irq_mask registers. * hardport is the other output, in range 0..3. * * Note that port and hardport may be the same variable in some cases. */ #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ { \ shift = mv_hc_from_port(port) * HC_SHIFT; \ hardport = mv_hardport_from_port(port); \ shift += hardport * 2; \ } static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) { return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); } static inline void __iomem *mv_hc_base_from_port(void __iomem *base, unsigned int port) { return mv_hc_base(base, mv_hc_from_port(port)); } static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) { return mv_hc_base_from_port(base, port) + MV_SATAHC_ARBTR_REG_SZ + (mv_hardport_from_port(port) * MV_PORT_REG_SZ); } static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) { void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; return hc_mmio + ofs; } static inline void __iomem *mv_host_base(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; return hpriv->base; } static inline void __iomem *mv_ap_base(struct ata_port *ap) { return mv_port_base(mv_host_base(ap->host), ap->port_no); } static inline int mv_get_hc_count(unsigned long port_flags) { return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); } /** * mv_save_cached_regs - (re-)initialize cached port registers * @ap: the port whose registers we are caching * * Initialize the local cache of port registers, * so that reading them over and over again can * be avoided on the hotter paths of this driver. * This saves a few microseconds each time we switch * to/from EDMA mode to perform (eg.) a drive cache flush. */ static void mv_save_cached_regs(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; pp->cached.fiscfg = readl(port_mmio + FISCFG); pp->cached.ltmode = readl(port_mmio + LTMODE); pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); } /** * mv_write_cached_reg - write to a cached port register * @addr: hardware address of the register * @old: pointer to cached value of the register * @new: new value for the register * * Write a new value to a cached register, * but only if the value is different from before. */ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) { if (new != *old) { unsigned long laddr; *old = new; /* * Workaround for 88SX60x1-B2 FEr SATA#13: * Read-after-write is needed to prevent generating 64-bit * write cycles on the PCI bus for SATA interface registers * at offsets ending in 0x4 or 0xc. * * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers. */ laddr = (long)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) { laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) { writelfl(new, addr); /* read after write */ return; } } writel(new, addr); /* unaffected by the errata */ } } static void mv_set_edma_ptrs(void __iomem *port_mmio, struct mv_host_priv *hpriv, struct mv_port_priv *pp) { u32 index; /* * initialize request queue */ pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; WARN_ON(pp->crqb_dma & 0x3ff); writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, port_mmio + EDMA_REQ_Q_IN_PTR); writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); /* * initialize response queue */ pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; WARN_ON(pp->crpb_dma & 0xff); writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, port_mmio + EDMA_RSP_Q_OUT_PTR); } static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) { /* * When writing to the main_irq_mask in hardware, * we must ensure exclusivity between the interrupt coalescing bits * and the corresponding individual port DONE_IRQ bits. * * Note that this register is really an "IRQ enable" register, * not an "IRQ mask" register as Marvell's naming might suggest. */ if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) mask &= ~DONE_IRQ_0_3; if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) mask &= ~DONE_IRQ_4_7; writelfl(mask, hpriv->main_irq_mask_addr); } static void mv_set_main_irq_mask(struct ata_host *host, u32 disable_bits, u32 enable_bits) { struct mv_host_priv *hpriv = host->private_data; u32 old_mask, new_mask; old_mask = hpriv->main_irq_mask; new_mask = (old_mask & ~disable_bits) | enable_bits; if (new_mask != old_mask) { hpriv->main_irq_mask = new_mask; mv_write_main_irq_mask(new_mask, hpriv); } } static void mv_enable_port_irqs(struct ata_port *ap, unsigned int port_bits) { unsigned int shift, hardport, port = ap->port_no; u32 disable_bits, enable_bits; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); disable_bits = (DONE_IRQ | ERR_IRQ) << shift; enable_bits = port_bits << shift; mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); } static void mv_clear_and_enable_port_irqs(struct ata_port *ap, void __iomem *port_mmio, unsigned int port_irqs) { struct mv_host_priv *hpriv = ap->host->private_data; int hardport = mv_hardport_from_port(ap->port_no); void __iomem *hc_mmio = mv_hc_base_from_port( mv_host_base(ap->host), ap->port_no); u32 hc_irq_cause; /* clear EDMA event indicators, if any */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); /* clear FIS IRQ Cause */ if (IS_GEN_IIE(hpriv)) writelfl(0, port_mmio + FIS_IRQ_CAUSE); mv_enable_port_irqs(ap, port_irqs); } static void mv_set_irq_coalescing(struct ata_host *host, unsigned int count, unsigned int usecs) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; u32 coal_enable = 0; unsigned long flags; unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | ALL_PORTS_COAL_DONE; /* Disable IRQ coalescing if either threshold is zero */ if (!usecs || !count) { clks = count = 0; } else { /* Respect maximum limits of the hardware */ clks = usecs * COAL_CLOCKS_PER_USEC; if (clks > MAX_COAL_TIME_THRESHOLD) clks = MAX_COAL_TIME_THRESHOLD; if (count > MAX_COAL_IO_COUNT) count = MAX_COAL_IO_COUNT; } spin_lock_irqsave(&host->lock, flags); mv_set_main_irq_mask(host, coal_disable, 0); if (is_dual_hc && !IS_GEN_I(hpriv)) { /* * GEN_II/GEN_IIE with dual host controllers: * one set of global thresholds for the entire chip. */ writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); writel(count, mmio + IRQ_COAL_IO_THRESHOLD); /* clear leftover coal IRQ bit */ writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); if (count) coal_enable = ALL_PORTS_COAL_DONE; clks = count = 0; /* force clearing of regular regs below */ } /* * All chips: independent thresholds for each HC on the chip. */ hc_mmio = mv_hc_base_from_port(mmio, 0); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_0_3_COAL_DONE; if (is_dual_hc) { hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_4_7_COAL_DONE; } mv_set_main_irq_mask(host, 0, coal_enable); spin_unlock_irqrestore(&host->lock, flags); } /** * mv_start_edma - Enable eDMA engine * @base: port base address * @pp: port private data * * Verify the local cache of the eDMA state is accurate with a * WARN_ON. * * LOCKING: * Inherited from caller. */ static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, struct mv_port_priv *pp, u8 protocol) { int want_ncq = (protocol == ATA_PROT_NCQ); if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); if (want_ncq != using_ncq) mv_stop_edma(ap); } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { struct mv_host_priv *hpriv = ap->host->private_data; mv_edma_cfg(ap, want_ncq, 1); mv_set_edma_ptrs(port_mmio, hpriv, pp); mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); writelfl(EDMA_EN, port_mmio + EDMA_CMD); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } } static void mv_wait_for_edma_empty_idle(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); const int per_loop = 5, timeout = (15 * 1000 / per_loop); int i; /* * Wait for the EDMA engine to finish transactions in progress. * No idea what a good "timeout" value might be, but measurements * indicate that it often requires hundreds of microseconds * with two drives in-use. So we use the 15msec value above * as a rough guess at what even more drives might require. */ for (i = 0; i < timeout; ++i) { u32 edma_stat = readl(port_mmio + EDMA_STATUS); if ((edma_stat & empty_idle) == empty_idle) break; udelay(per_loop); } /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ } /** * mv_stop_edma_engine - Disable eDMA engine * @port_mmio: io base address * * LOCKING: * Inherited from caller. */ static int mv_stop_edma_engine(void __iomem *port_mmio) { int i; /* Disable eDMA. The disable bit auto clears. */ writelfl(EDMA_DS, port_mmio + EDMA_CMD); /* Wait for the chip to confirm eDMA is off. */ for (i = 10000; i > 0; i--) { u32 reg = readl(port_mmio + EDMA_CMD); if (!(reg & EDMA_EN)) return 0; udelay(10); } return -EIO; } static int mv_stop_edma(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; int err = 0; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; mv_wait_for_edma_empty_idle(ap); if (mv_stop_edma_engine(port_mmio)) { ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); err = -EIO; } mv_edma_cfg(ap, 0, 0); return err; } #ifdef ATA_DEBUG static void mv_dump_mem(void __iomem *start, unsigned bytes) { int b, w; for (b = 0; b < bytes; ) { DPRINTK("%p: ", start + b); for (w = 0; b < bytes && w < 4; w++) { printk("%08x ", readl(start + b)); b += sizeof(u32); } printk("\n"); } } #endif static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) { #ifdef ATA_DEBUG int b, w; u32 dw; for (b = 0; b < bytes; ) { DPRINTK("%02x: ", b); for (w = 0; b < bytes && w < 4; w++) { (void) pci_read_config_dword(pdev, b, &dw); printk("%08x ", dw); b += sizeof(u32); } printk("\n"); } #endif } static void mv_dump_all_regs(void __iomem *mmio_base, int port, struct pci_dev *pdev) { #ifdef ATA_DEBUG void __iomem *hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); void __iomem *port_base; int start_port, num_ports, p, start_hc, num_hcs, hc; if (0 > port) { start_hc = start_port = 0; num_ports = 8; /* shld be benign for 4 port devs */ num_hcs = 2; } else { start_hc = port >> MV_PORT_HC_SHIFT; start_port = port; num_ports = num_hcs = 1; } DPRINTK("All registers for port(s) %u-%u:\n", start_port, num_ports > 1 ? num_ports - 1 : start_port); if (NULL != pdev) { DPRINTK("PCI config space regs:\n"); mv_dump_pci_cfg(pdev, 0x68); } DPRINTK("PCI regs:\n"); mv_dump_mem(mmio_base+0xc00, 0x3c); mv_dump_mem(mmio_base+0xd00, 0x34); mv_dump_mem(mmio_base+0xf00, 0x4); mv_dump_mem(mmio_base+0x1d00, 0x6c); for (hc = start_hc; hc < start_hc + num_hcs; hc++) { hc_base = mv_hc_base(mmio_base, hc); DPRINTK("HC regs (HC %i):\n", hc); mv_dump_mem(hc_base, 0x1c); } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); DPRINTK("EDMA regs (port %i):\n", p); mv_dump_mem(port_base, 0x54); DPRINTK("SATA regs (port %i):\n", p); mv_dump_mem(port_base+0x300, 0x60); } #endif } static unsigned int mv_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_CONTROL: case SCR_ERROR: ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); break; case SCR_ACTIVE: ofs = SATA_ACTIVE; /* active is not with the others */ break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(mv_ap_base(link->ap) + ofs); return 0; } else return -EINVAL; } static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { void __iomem *addr = mv_ap_base(link->ap) + ofs; if (sc_reg_in == SCR_CONTROL) { /* * Workaround for 88SX60x1 FEr SATA#26: * * COMRESETs have to take care not to accidently * put the drive to sleep when writing SCR_CONTROL. * Setting bits 12..15 prevents this problem. * * So if we see an outbound COMMRESET, set those bits. * Ditto for the followup write that clears the reset. * * The proprietary driver does this for * all chip versions, and so do we. */ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) val |= 0xf000; } writelfl(val, addr); return 0; } else return -EINVAL; } static void mv6_dev_config(struct ata_device *adev) { /* * Deal with Gen-II ("mv6") hardware quirks/restrictions: * * Gen-II does not support NCQ over a port multiplier * (no FIS-based switching). */ if (adev->flags & ATA_DFLAG_NCQ) { if (sata_pmp_attached(adev->link->ap)) { adev->flags &= ~ATA_DFLAG_NCQ; ata_dev_printk(adev, KERN_INFO, "NCQ disabled for command-based switching\n"); } } } static int mv_qc_defer(struct ata_queued_cmd *qc) { struct ata_link *link = qc->dev->link; struct ata_port *ap = link->ap; struct mv_port_priv *pp = ap->private_data; /* * Don't allow new commands if we're in a delayed EH state * for NCQ and/or FIS-based switching. */ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) return ATA_DEFER_PORT; /* PIO commands need exclusive link: no other commands [DMA or PIO] * can run concurrently. * set excl_link when we want to send a PIO command in DMA mode * or a non-NCQ command in NCQ mode. * When we receive a command from that link, and there are no * outstanding commands, mark a flag to clear excl_link and let * the command go through. */ if (unlikely(ap->excl_link)) { if (link == ap->excl_link) { if (ap->nr_active_links) return ATA_DEFER_PORT; qc->flags |= ATA_QCFLAG_CLEAR_EXCL; return 0; } else return ATA_DEFER_PORT; } /* * If the port is completely idle, then allow the new qc. */ if (ap->nr_active_links == 0) return 0; /* * The port is operating in host queuing mode (EDMA) with NCQ * enabled, allow multiple NCQ commands. EDMA also allows * queueing multiple DMA commands but libata core currently * doesn't allow it. */ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { if (ata_is_ncq(qc->tf.protocol)) return 0; else { ap->excl_link = link; return ATA_DEFER_PORT; } } return ATA_DEFER_PORT; } static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) { struct mv_port_priv *pp = ap->private_data; void __iomem *port_mmio; u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; u32 ltmode, *old_ltmode = &pp->cached.ltmode; u32 haltcond, *old_haltcond = &pp->cached.haltcond; ltmode = *old_ltmode & ~LTMODE_BIT8; haltcond = *old_haltcond | EDMA_ERR_DEV; if (want_fbs) { fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; ltmode = *old_ltmode | LTMODE_BIT8; if (want_ncq) haltcond &= ~EDMA_ERR_DEV; else fiscfg |= FISCFG_WAIT_DEV_ERR; } else { fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); } port_mmio = mv_ap_base(ap); mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); } static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) { struct mv_host_priv *hpriv = ap->host->private_data; u32 old, new; /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ old = readl(hpriv->base + GPIO_PORT_CTL); if (want_ncq) new = old | (1 << 22); else new = old & ~(1 << 22); if (new != old) writel(new, hpriv->base + GPIO_PORT_CTL); } /** * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma * @ap: Port being initialized * * There are two DMA modes on these chips: basic DMA, and EDMA. * * Bit-0 of the "EDMA RESERVED" register enables/disables use * of basic DMA on the GEN_IIE versions of the chips. * * This bit survives EDMA resets, and must be set for basic DMA * to function, and should be cleared when EDMA is active. */ static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) { struct mv_port_priv *pp = ap->private_data; u32 new, *old = &pp->cached.unknown_rsvd; if (enable_bmdma) new = *old | 1; else new = *old & ~1; mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); } /* * SOC chips have an issue whereby the HDD LEDs don't always blink * during I/O when NCQ is enabled. Enabling a special "LED blink" mode * of the SOC takes care of it, generating a steady blink rate when * any drive on the chip is active. * * Unfortunately, the blink mode is a global hardware setting for the SOC, * so we must use it whenever at least one port on the SOC has NCQ enabled. * * We turn "LED blink" off when NCQ is not in use anywhere, because the normal * LED operation works then, and provides better (more accurate) feedback. * * Note that this code assumes that an SOC never has more than one HC onboard. */ static void mv_soc_led_blink_enable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) return; hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_soc_led_blink_disable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; unsigned int port; if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) return; /* disable led-blink only if no ports are using NCQ */ for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *this_ap = host->ports[port]; struct mv_port_priv *pp = this_ap->private_data; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return; } hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) { u32 cfg; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = mv_ap_base(ap); /* set up non-NCQ EDMA configuration */ cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ else if (IS_GEN_II(hpriv)) { cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; mv_60x1_errata_sata25(ap, want_ncq); } else if (IS_GEN_IIE(hpriv)) { int want_fbs = sata_pmp_attached(ap); /* * Possible future enhancement: * * The chip can use FBS with non-NCQ, if we allow it, * But first we need to have the error handling in place * for this mode (datasheet section 7.3.15.4.2.3). * So disallow non-NCQ FBS for now. */ want_fbs &= want_ncq; mv_config_fbs(ap, want_ncq, want_fbs); if (want_fbs) { pp->pp_flags |= MV_PP_FLAG_FBS_EN; cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ } cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ if (want_edma) { cfg |= (1 << 22); /* enab 4-entry host queue cache */ if (!IS_SOC(hpriv)) cfg |= (1 << 18); /* enab early completion */ } if (hpriv->hp_flags & MV_HP_CUT_THROUGH) cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ mv_bmdma_enable_iie(ap, !want_edma); if (IS_SOC(hpriv)) { if (want_ncq) mv_soc_led_blink_enable(ap); else mv_soc_led_blink_disable(ap); } } if (want_ncq) { cfg |= EDMA_CFG_NCQ; pp->pp_flags |= MV_PP_FLAG_NCQ_EN; } writelfl(cfg, port_mmio + EDMA_CFG); } static void mv_port_free_dma_mem(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; int tag; if (pp->crqb) { dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); pp->crqb = NULL; } if (pp->crpb) { dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); pp->crpb = NULL; } /* * For GEN_I, there's no NCQ, so we have only a single sg_tbl. * For later hardware, we have one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (pp->sg_tbl[tag]) { if (tag == 0 || !IS_GEN_I(hpriv)) dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl[tag], pp->sg_tbl_dma[tag]); pp->sg_tbl[tag] = NULL; } } } /** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate * * Allocate and point to DMA memory, init port private memory, * zero indices. * * LOCKING: * Inherited from caller. */ static int mv_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; unsigned long flags; int tag; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; ap->private_data = pp; pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM; memset(pp->crqb, 0, MV_CRQB_Q_SZ); pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem; memset(pp->crpb, 0, MV_CRPB_Q_SZ); /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) ap->flags |= ATA_FLAG_AN; /* * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. * For later hardware, we need one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (tag == 0 || !IS_GEN_I(hpriv)) { pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL, &pp->sg_tbl_dma[tag]); if (!pp->sg_tbl[tag]) goto out_port_free_dma_mem; } else { pp->sg_tbl[tag] = pp->sg_tbl[0]; pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; } } spin_lock_irqsave(ap->lock, flags); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); spin_unlock_irqrestore(ap->lock, flags); return 0; out_port_free_dma_mem: mv_port_free_dma_mem(ap); return -ENOMEM; } /** * mv_port_stop - Port specific cleanup/stop routine. * @ap: ATA channel to manipulate * * Stop DMA, cleanup port memory. * * LOCKING: * This routine uses the host lock to protect the DMA stop. */ static void mv_port_stop(struct ata_port *ap) { unsigned long flags; spin_lock_irqsave(ap->lock, flags); mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); spin_unlock_irqrestore(ap->lock, flags); mv_port_free_dma_mem(ap); } /** * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries * @qc: queued command whose SG list to source from * * Populate the SG list and mark the last entry. * * LOCKING: * Inherited from caller. */ static void mv_fill_sg(struct ata_queued_cmd *qc) { struct mv_port_priv *pp = qc->ap->private_data; struct scatterlist *sg; struct mv_sg *mv_sg, *last_sg = NULL; unsigned int si; mv_sg = pp->sg_tbl[qc->tag]; for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); while (sg_len) { u32 offset = addr & 0xffff; u32 len = sg_len; if (offset + len > 0x10000) len = 0x10000 - offset; mv_sg->addr = cpu_to_le32(addr & 0xffffffff); mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); mv_sg->flags_size = cpu_to_le32(len & 0xffff); mv_sg->reserved = 0; sg_len -= len; addr += len; last_sg = mv_sg; mv_sg++; } } if (likely(last_sg)) last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); mb(); /* ensure data structure is visible to the chipset */ } static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); *cmdw = cpu_to_le16(tmp); } /** * mv_sff_irq_clear - Clear hardware interrupt after DMA. * @ap: Port associated with this ATA transaction. * * We need this only for ATAPI bmdma transactions, * as otherwise we experience spurious interrupts * after libata-sff handles the bmdma interrupts. */ static void mv_sff_irq_clear(struct ata_port *ap) { mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); } /** * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. * @qc: queued command to check for chipset/DMA compatibility. * * The bmdma engines cannot handle speculative data sizes * (bytecount under/over flow). So only allow DMA for * data transfer commands with known data sizes. * * LOCKING: * Inherited from caller. */ static int mv_check_atapi_dma(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (scmd) { switch (scmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case WRITE_6: case WRITE_10: case WRITE_12: case GPCMD_READ_CD: case GPCMD_SEND_DVD_STRUCTURE: case GPCMD_SEND_CUE_SHEET: return 0; /* DMA is safe */ } } return -EOPNOTSUPP; /* use PIO instead */ } /** * mv_bmdma_setup - Set up BMDMA transaction * @qc: queued command to prepare DMA for. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; mv_fill_sg(qc); /* clear all DMA cmd bits */ writel(0, port_mmio + BMDMA_CMD); /* load PRD table addr. */ writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, port_mmio + BMDMA_PRD_HIGH); writelfl(pp->sg_tbl_dma[qc->tag], port_mmio + BMDMA_PRD_LOW); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); } /** * mv_bmdma_start - Start a BMDMA transaction * @qc: queued command to start DMA on. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; /* start host DMA transaction */ writelfl(cmd, port_mmio + BMDMA_CMD); } /** * mv_bmdma_stop - Stop BMDMA transfer * @qc: queued command to stop DMA on. * * Clears the ATA_DMA_START flag in the bmdma control register * * LOCKING: * Inherited from caller. */ static void mv_bmdma_stop_ap(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 cmd; /* clear start/stop bit */ cmd = readl(port_mmio + BMDMA_CMD); if (cmd & ATA_DMA_START) { cmd &= ~ATA_DMA_START; writelfl(cmd, port_mmio + BMDMA_CMD); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ ata_sff_dma_pause(ap); } } static void mv_bmdma_stop(struct ata_queued_cmd *qc) { mv_bmdma_stop_ap(qc->ap); } /** * mv_bmdma_status - Read BMDMA status * @ap: port for which to retrieve DMA status. * * Read and return equivalent of the sff BMDMA status register. * * LOCKING: * Inherited from caller. */ static u8 mv_bmdma_status(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg, status; /* * Other bits are valid only if ATA_DMA_ACTIVE==0, * and the ATA_DMA_INTR bit doesn't exist. */ reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE) status = ATA_DMA_ACTIVE; else if (reg & ATA_DMA_ERR) status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; else { /* * Just because DMA_ACTIVE is 0 (DMA completed), * this does _not_ mean the device is "done". * So we should not yet be signalling ATA_DMA_INTR * in some cases. Eg. DSM/TRIM, and perhaps others. */ mv_bmdma_stop_ap(ap); if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) status = 0; else status = ATA_DMA_INTR; } return status; } static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; /* * Workaround for 88SX60x1 FEr SATA#24. * * Chip may corrupt WRITEs if multi_count >= 4kB. * Note that READs are unaffected. * * It's not clear if this errata really means "4K bytes", * or if it always happens for multi_count > 7 * regardless of device sector_size. * * So, for safety, any write with multi_count > 7 * gets converted here into a regular PIO write instead: */ if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { if (qc->dev->multi_count > 7) { switch (tf->command) { case ATA_CMD_WRITE_MULTI: tf->command = ATA_CMD_PIO_WRITE; break; case ATA_CMD_WRITE_MULTI_FUA_EXT: tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ /* fall through */ case ATA_CMD_WRITE_MULTI_EXT: tf->command = ATA_CMD_PIO_WRITE_EXT; break; } } } } /** * mv_qc_prep - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; __le16 *cw; struct ata_taskfile *tf = &qc->tf; u16 flags = 0; unsigned in_index; switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); return; default: return; } /* Fill in command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; pp->crqb[in_index].sg_addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; /* Sadly, the CRQB cannot accomodate all registers--there are * only 11 bytes...so we must pick and choose required * registers based on the command. So, we drop feature and * hob_feature for [RW] DMA commands, but they are needed for * NCQ. NCQ will drop hob_nsect, which is not needed there * (nsect is used only for the tag; feat/hob_feat hold true nsect). */ switch (tf->command) { case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE: mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. * * FIXME: modify libata to give qc_prep a return value and * return error here. */ BUG_ON(tf->command); break; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_qc_prep_iie - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; struct ata_taskfile *tf = &qc->tf; unsigned in_index; u32 flags = 0; if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) return; if (tf->command == ATA_CMD_DSM) return; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= qc->tag << CRQB_HOSTQ_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); crqb->ata_cmd[0] = cpu_to_le32( (tf->command << 16) | (tf->feature << 24) ); crqb->ata_cmd[1] = cpu_to_le32( (tf->lbal << 0) | (tf->lbam << 8) | (tf->lbah << 16) | (tf->device << 24) ); crqb->ata_cmd[2] = cpu_to_le32( (tf->hob_lbal << 0) | (tf->hob_lbam << 8) | (tf->hob_lbah << 16) | (tf->hob_feature << 24) ); crqb->ata_cmd[3] = cpu_to_le32( (tf->nsect << 0) | (tf->hob_nsect << 8) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_sff_check_status - fetch device status, if valid * @ap: ATA port to fetch status from * * When using command issue via mv_qc_issue_fis(), * the initial ATA_BUSY state does not show up in the * ATA status (shadow) register. This can confuse libata! * * So we have a hook here to fake ATA_BUSY for that situation, * until the first time a BUSY, DRQ, or ERR bit is seen. * * The rest of the time, it simply returns the ATA status register. */ static u8 mv_sff_check_status(struct ata_port *ap) { u8 stat = ioread8(ap->ioaddr.status_addr); struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; else stat = ATA_BUSY; } return stat; } /** * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register * @fis: fis to be sent * @nwords: number of 32-bit words in the fis */ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) { void __iomem *port_mmio = mv_ap_base(ap); u32 ifctl, old_ifctl, ifstat; int i, timeout = 200, final_word = nwords - 1; /* Initiate FIS transmission mode */ old_ifctl = readl(port_mmio + SATA_IFCTL); ifctl = 0x100 | (old_ifctl & 0xf); writelfl(ifctl, port_mmio + SATA_IFCTL); /* Send all words of the FIS except for the final word */ for (i = 0; i < final_word; ++i) writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); /* Flag end-of-transmission, and then send the final word */ writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); /* * Wait for FIS transmission to complete. * This typically takes just a single iteration. */ do { ifstat = readl(port_mmio + SATA_IFSTAT); } while (!(ifstat & 0x1000) && --timeout); /* Restore original port configuration */ writelfl(old_ifctl, port_mmio + SATA_IFCTL); /* See if it worked */ if ((ifstat & 0x3000) != 0x1000) { ata_port_printk(ap, KERN_WARNING, "%s transmission error, ifstat=%08x\n", __func__, ifstat); return AC_ERR_OTHER; } return 0; } /** * mv_qc_issue_fis - Issue a command directly as a FIS * @qc: queued command to start * * Note that the ATA shadow registers are not updated * after command issue, so the device will appear "READY" * if polled, even while it is BUSY processing the command. * * So we use a status hook to fake ATA_BUSY until the drive changes state. * * Note: we don't get updated shadow regs on *completion* * of non-data commands. So avoid sending them via this function, * as they will appear to have completed immediately. * * GEN_IIE has special registers that we could get the result tf from, * but earlier chipsets do not. For now, we ignore those registers. */ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct ata_link *link = qc->dev->link; u32 fis[5]; int err = 0; ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); if (err) return err; switch (qc->tf.protocol) { case ATAPI_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; /* fall through */ case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_FIRST; break; case ATA_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; if (qc->tf.flags & ATA_TFLAG_WRITE) ap->hsm_task_state = HSM_ST_FIRST; else ap->hsm_task_state = HSM_ST; break; default: ap->hsm_task_state = HSM_ST_LAST; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_queue_pio_task(ap, 0); return 0; } /** * mv_qc_issue - Initiate a command to the host * @qc: queued command to start * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it sanity checks our local * caches of the request producer/consumer indices then enables * DMA and bumps the request producer index. * * LOCKING: * Inherited from caller. */ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { static int limit_warnings = 10; struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; u32 in_index; unsigned int port_irqs; pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ switch (qc->tf.protocol) { case ATA_PROT_DMA: if (qc->tf.command == ATA_CMD_DSM) { if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ return AC_ERR_OTHER; break; /* use bmdma for this */ } /* fall thru */ case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; /* Write the request in pointer to kick the EDMA to life */ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, port_mmio + EDMA_REQ_Q_IN_PTR); return 0; case ATA_PROT_PIO: /* * Errata SATA#16, SATA#24: warn if multiple DRQs expected. * * Someday, we might implement special polling workarounds * for these, but it all seems rather unnecessary since we * normally use only DMA for commands which transfer more * than a single block of data. * * Much of the time, this could just work regardless. * So for now, just log the incident, and allow the attempt. */ if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { --limit_warnings; ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME ": attempting PIO w/multiple DRQ: " "this may fail due to h/w errata\n"); } /* drop through */ case ATA_PROT_NODATA: case ATAPI_PROT_PIO: case ATAPI_PROT_NODATA: if (ap->flags & ATA_FLAG_PIO_POLLING) qc->tf.flags |= ATA_TFLAG_POLLING; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) port_irqs = ERR_IRQ; /* mask device interrupt when polling */ else port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ /* * We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing * shadow block, etc registers. */ mv_stop_edma(ap); mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); mv_pmp_select(ap, qc->dev->link->pmp); if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { struct mv_host_priv *hpriv = ap->host->private_data; /* * Workaround for 88SX60x1 FEr SATA#25 (part 2). * * After any NCQ error, the READ_LOG_EXT command * from libata-eh *must* use mv_qc_issue_fis(). * Otherwise it might fail, due to chip errata. * * Rather than special-case it, we'll just *always* * use this method here for READ_LOG_EXT, making for * easier testing. */ if (IS_GEN_II(hpriv)) return mv_qc_issue_fis(qc); } return ata_bmdma_qc_issue(qc); } static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return NULL; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) return qc; return NULL; } static void mv_pmp_error_handler(struct ata_port *ap) { unsigned int pmp, pmp_map; struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { /* * Perform NCQ error analysis on failed PMPs * before we freeze the port entirely. * * The failed PMPs are marked earlier by mv_pmp_eh_prep(). */ pmp_map = pp->delayed_eh_pmp_map; pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ata_eh_analyze_ncq_error(link); } } ata_port_freeze(ap); } sata_pmp_error_handler(ap); } static unsigned int mv_get_err_pmp_map(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); return readl(port_mmio + SATA_TESTCTL) >> 16; } static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) { struct ata_eh_info *ehi; unsigned int pmp; /* * Initialize EH info for PMPs which saw device errors */ ehi = &ap->link.eh_info; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ehi = &link->eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "dev err"); ehi->err_mask |= AC_ERR_DEV; ehi->action |= ATA_EH_RESET; ata_link_abort(link); } } } static int mv_req_q_empty(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 in_ptr, out_ptr; in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; return (in_ptr == out_ptr); /* 1 == queue_is_empty */ } static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; int failed_links; unsigned int old_map, new_map; /* * Device error during FBS+NCQ operation: * * Set a port flag to prevent further I/O being enqueued. * Leave the EDMA running to drain outstanding commands from this port. * Perform the post-mortem/EH only when all responses are complete. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). */ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; pp->delayed_eh_pmp_map = 0; } old_map = pp->delayed_eh_pmp_map; new_map = old_map | mv_get_err_pmp_map(ap); if (old_map != new_map) { pp->delayed_eh_pmp_map = new_map; mv_pmp_eh_prep(ap, new_map & ~old_map); } failed_links = hweight16(new_map); ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " "failed_links=%d nr_active_links=%d\n", __func__, pp->delayed_eh_pmp_map, ap->qc_active, failed_links, ap->nr_active_links); if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { mv_process_crpb_entries(ap, pp); mv_stop_edma(ap); mv_eh_freeze(ap); ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); return 1; /* handled */ } ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); return 1; /* handled */ } static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) { /* * Possible future enhancement: * * FBS+non-NCQ operation is not yet implemented. * See related notes in mv_edma_cfg(). * * Device error during FBS+non-NCQ operation: * * We need to snapshot the shadow registers for each failed command. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). */ return 0; /* not handled */ } static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) { struct mv_port_priv *pp = ap->private_data; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; /* EDMA was not active: not handled */ if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) return 0; /* FBS was not active: not handled */ if (!(edma_err_cause & EDMA_ERR_DEV)) return 0; /* non DEV error: not handled */ edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) return 0; /* other problems: not handled */ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { /* * EDMA should NOT have self-disabled for this case. * If it did, then something is wrong elsewhere, * and we cannot handle it here. */ if (edma_err_cause & EDMA_ERR_SELF_DIS) { ata_port_printk(ap, KERN_WARNING, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_ncq_dev_err(ap); } else { /* * EDMA should have self-disabled for this case. * If it did not, then something is wrong elsewhere, * and we cannot handle it here. */ if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { ata_port_printk(ap, KERN_WARNING, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_non_ncq_dev_err(ap); } return 0; /* not handled */ } static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) { struct ata_eh_info *ehi = &ap->link.eh_info; char *when = "idle"; ata_ehi_clear_desc(ehi); if (edma_was_enabled) { when = "EDMA enabled"; } else { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) when = "polling"; } ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); ehi->err_mask |= AC_ERR_OTHER; ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); } /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate * * Most cases require a full reset of the chip's state machine, * which also performs a COMRESET. * Also, if the port disabled DMA, update our cached copy to match. * * LOCKING: * Inherited from caller. */ static void mv_err_intr(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, eh_freeze_mask, serr = 0; u32 fis_cause = 0; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; unsigned int action = 0, err_mask = 0; struct ata_eh_info *ehi = &ap->link.eh_info; struct ata_queued_cmd *qc; int abort = 0; /* * Read and clear the SError and err_cause bits. * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear * the FIS_IRQ_CAUSE register before clearing edma_err_cause. */ sata_scr_read(&ap->link, SCR_ERROR, &serr); sata_scr_write_flush(&ap->link, SCR_ERROR, serr); edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); } writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); if (edma_err_cause & EDMA_ERR_DEV) { /* * Device errors during FIS-based switching operation * require special handling. */ if (mv_handle_dev_err(ap, edma_err_cause)) return; } qc = mv_get_active_qc(ap); ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", edma_err_cause, pp->pp_flags); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); if (fis_cause & FIS_IRQ_CAUSE_AN) { u32 ec = edma_err_cause & ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); sata_async_notification(ap); if (!ec) return; /* Just an AN; no need for the nukes */ ata_ehi_push_desc(ehi, "SDB notify"); } } /* * All generations share these EDMA error cause bits: */ if (edma_err_cause & EDMA_ERR_DEV) { err_mask |= AC_ERR_DEV; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "dev error"); } if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "parity error"); } if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? "dev disconnect" : "dev connect"); action |= ATA_EH_RESET; } /* * Gen-I has a different SELF_DIS bit, * different FREEZE bits, and no SERR bit: */ if (IS_GEN_I(hpriv)) { eh_freeze_mask = EDMA_EH_FREEZE_5; if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } } else { eh_freeze_mask = EDMA_EH_FREEZE; if (edma_err_cause & EDMA_ERR_SELF_DIS) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } if (edma_err_cause & EDMA_ERR_SERR) { ata_ehi_push_desc(ehi, "SError=%08x", serr); err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } } if (!err_mask) { err_mask = AC_ERR_OTHER; action |= ATA_EH_RESET; } ehi->serror |= serr; ehi->action |= action; if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; if (err_mask == AC_ERR_DEV) { /* * Cannot do ata_port_freeze() here, * because it would kill PIO access, * which is needed for further diagnosis. */ mv_eh_freeze(ap); abort = 1; } else if (edma_err_cause & eh_freeze_mask) { /* * Note to self: ata_port_freeze() calls ata_port_abort() */ ata_port_freeze(ap); } else { abort = 1; } if (abort) { if (qc) ata_link_abort(qc->dev->link); else ata_port_abort(ap); } } static void mv_process_crpb_response(struct ata_port *ap, struct mv_crpb *response, unsigned int tag, int ncq_enabled) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc) { u8 ata_status; u16 edma_status = le16_to_cpu(response->flags); /* * edma_status from a response queue entry: * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). * MSB is saved ATA status from command completion. */ if (!ncq_enabled) { u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; if (err_cause) { /* * Error will be seen/handled by mv_err_intr(). * So do nothing at all here. */ return; } } ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; if (!ac_err_mask(ata_status)) ata_qc_complete(qc); /* else: leave it for mv_err_intr() */ } else { ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", __func__, tag); } } static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; bool work_done = false; int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); /* Get the hardware queue position index */ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; /* Process new responses from since the last time we looked */ while (in_index != pp->resp_idx) { unsigned int tag; struct mv_crpb *response = &pp->crpb[pp->resp_idx]; pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; if (IS_GEN_I(hpriv)) { /* 50xx: no NCQ, only one command active at a time */ tag = ap->link.active_tag; } else { /* Gen II/IIE: get command tag from CRPB entry */ tag = le16_to_cpu(response->id) & 0x1f; } mv_process_crpb_response(ap, response, tag, ncq_enabled); work_done = true; } /* Update the software queue position index in hardware */ if (work_done) writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), port_mmio + EDMA_RSP_Q_OUT_PTR); } static void mv_port_intr(struct ata_port *ap, u32 port_cause) { struct mv_port_priv *pp; int edma_was_enabled; /* * Grab a snapshot of the EDMA_EN flag setting, * so that we have a consistent view for this port, * even if something we call of our routines changes it. */ pp = ap->private_data; edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); /* * Process completed CRPB response(s) before other events. */ if (edma_was_enabled && (port_cause & DONE_IRQ)) { mv_process_crpb_entries(ap, pp); if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) mv_handle_fbs_ncq_dev_err(ap); } /* * Handle chip-reported errors, or continue on to handle PIO. */ if (unlikely(port_cause & ERR_IRQ)) { mv_err_intr(ap); } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } } /** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure * @main_irq_cause: Main interrupt cause register for the chip. * * LOCKING: * Inherited from caller. */ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; unsigned int handled = 0, port; /* If asserted, clear the "all ports" IRQ coalescing bit */ if (main_irq_cause & ALL_PORTS_COAL_DONE) writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *ap = host->ports[port]; unsigned int p, shift, hardport, port_cause; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); /* * Each hc within the host has its own hc_irq_cause register, * where the interrupting ports bits get ack'd. */ if (hardport == 0) { /* first port on this hc ? */ u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; u32 port_mask, ack_irqs; /* * Skip this entire hc if nothing pending for any ports */ if (!hc_cause) { port += MV_PORTS_PER_HC - 1; continue; } /* * We don't need/want to read the hc_irq_cause register, * because doing so hurts performance, and * main_irq_cause already gives us everything we need. * * But we do have to *write* to the hc_irq_cause to ack * the ports that we are handling this time through. * * This requires that we create a bitmap for those * ports which interrupted us, and use that bitmap * to ack (only) those ports via hc_irq_cause. */ ack_irqs = 0; if (hc_cause & PORTS_0_3_COAL_DONE) ack_irqs = HC_COAL_IRQ; for (p = 0; p < MV_PORTS_PER_HC; ++p) { if ((port + p) >= hpriv->n_ports) break; port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); if (hc_cause & port_mask) ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; } hc_mmio = mv_hc_base_from_port(mmio, port); writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); handled = 1; } /* * Handle interrupts signalled for this port: */ port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); if (port_cause) mv_port_intr(ap, port_cause); } return handled; } static int mv_pci_error(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; err_cause = readl(mmio + hpriv->irq_cause_offset); dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); writelfl(0, mmio + hpriv->irq_cause_offset); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; if (!ata_link_offline(&ap->link)) { ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); if (!printed++) ata_ehi_push_desc(ehi, "PCI err cause 0x%08x", err_cause); err_mask = AC_ERR_HOST_BUS; ehi->action = ATA_EH_RESET; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; ata_port_freeze(ap); } } return 1; /* handled */ } /** * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level * routine to handle. Also check for PCI errors which are only * reported here. * * LOCKING: * This routine holds the host lock while processing pending * interrupts. */ static irqreturn_t mv_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct mv_host_priv *hpriv = host->private_data; unsigned int handled = 0; int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; u32 main_irq_cause, pending_irqs; spin_lock(&host->lock); /* for MSI: block new interrupts while in here */ if (using_msi) mv_write_main_irq_mask(0, hpriv); main_irq_cause = readl(hpriv->main_irq_cause_addr); pending_irqs = main_irq_cause & hpriv->main_irq_mask; /* * Deal with cases where we either have nothing pending, or have read * a bogus register value which can indicate HW removal or PCI fault. */ if (pending_irqs && main_irq_cause != 0xffffffffU) { if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) handled = mv_pci_error(host, hpriv->base); else handled = mv_host_intr(host, pending_irqs); } /* for MSI: unmask; interrupt cause bits will retrigger now */ if (using_msi) mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); spin_unlock(&host->lock); return IRQ_RETVAL(handled); } static unsigned int mv5_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_ERROR: case SCR_CONTROL: ofs = sc_reg_in * sizeof(u32); break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(addr + ofs); return 0; } else return -EINVAL; } static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { writelfl(val, addr + ofs); return 0; } else return -EINVAL; } static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) { struct pci_dev *pdev = to_pci_dev(host->dev); int early_5080; early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= (1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } mv_reset_pci_bus(host, mmio); } static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x0fcfffff, mmio + FLASH_CTL); } static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *phy_mmio = mv5_phy_base(mmio, idx); u32 tmp; tmp = readl(phy_mmio + MV5_PHY_MODE); hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ } static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; writel(0, mmio + GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= ~(1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *phy_mmio = mv5_phy_base(mmio, port); const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); u32 tmp; int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { tmp = readl(phy_mmio + MV5_LTMODE); tmp |= (1 << 19); writel(tmp, phy_mmio + MV5_LTMODE); tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; tmp |= 0x1; writel(tmp, phy_mmio + MV5_PHY_CTL); } tmp = readl(phy_mmio + MV5_PHY_MODE); tmp &= ~mask; tmp |= hpriv->signal[port].pre; tmp |= hpriv->signal[port].amps; writel(tmp, phy_mmio + MV5_PHY_MODE); } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x11f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int hc) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); u32 tmp; ZERO(0x00c); ZERO(0x010); ZERO(0x014); ZERO(0x018); tmp = readl(hc_mmio + 0x20); tmp &= 0x1c1c1c1c; tmp |= 0x03030303; writel(tmp, hc_mmio + 0x20); } #undef ZERO static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int hc, port; for (hc = 0; hc < n_hc; hc++) { for (port = 0; port < MV_PORTS_PER_HC; port++) mv5_reset_hc_port(hpriv, mmio, (hc * MV_PORTS_PER_HC) + port); mv5_reset_one_hc(hpriv, mmio, hc); } return 0; } #undef ZERO #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); tmp &= 0xff00ffff; writel(tmp, mmio + MV_PCI_MODE); ZERO(MV_PCI_DISC_TIMER); ZERO(MV_PCI_MSI_TRIGGER); writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(MV_PCI_SERR_MASK); ZERO(hpriv->irq_cause_offset); ZERO(hpriv->irq_mask_offset); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); ZERO(MV_PCI_ERR_COMMAND); } #undef ZERO static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; mv5_reset_flash(hpriv, mmio); tmp = readl(mmio + GPIO_PORT_CTL); tmp &= 0x3; tmp |= (1 << 5) | (1 << 6); writel(tmp, mmio + GPIO_PORT_CTL); } /** * mv6_reset_hc - Perform the 6xxx global soft reset * @mmio: base address of the HBA * * This routine only applies to 6xxx parts. * * LOCKING: * Inherited from caller. */ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { void __iomem *reg = mmio + PCI_MAIN_CMD_STS; int i, rc = 0; u32 t; /* Following procedure defined in PCI "main command and status * register" table. */ t = readl(reg); writel(t | STOP_PCI_MASTER, reg); for (i = 0; i < 1000; i++) { udelay(1); t = readl(reg); if (PCI_MASTER_EMPTY & t) break; } if (!(PCI_MASTER_EMPTY & t)) { printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); rc = 1; goto done; } /* set reset */ i = 5; do { writel(t | GLOB_SFT_RST, reg); t = readl(reg); udelay(1); } while (!(GLOB_SFT_RST & t) && (i-- > 0)); if (!(GLOB_SFT_RST & t)) { printk(KERN_ERR DRV_NAME ": can't set global reset\n"); rc = 1; goto done; } /* clear reset and *reenable the PCI master* (not mentioned in spec) */ i = 5; do { writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); t = readl(reg); udelay(1); } while ((GLOB_SFT_RST & t) && (i-- > 0)); if (GLOB_SFT_RST & t) { printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); rc = 1; } done: return rc; } static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; tmp = readl(mmio + RESET_CFG); if ((tmp & (1 << 0)) == 0) { hpriv->signal[idx].amps = 0x7 << 8; hpriv->signal[idx].pre = 0x1 << 5; return; } port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x00000060, mmio + GPIO_PORT_CTL); } static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 hp_flags = hpriv->hp_flags; int fix_phy_mode2 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); int fix_phy_mode4 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); u32 m2, m3; if (fix_phy_mode2) { m2 = readl(port_mmio + PHY_MODE2); m2 &= ~(1 << 16); m2 |= (1 << 31); writel(m2, port_mmio + PHY_MODE2); udelay(200); m2 = readl(port_mmio + PHY_MODE2); m2 &= ~((1 << 16) | (1 << 31)); writel(m2, port_mmio + PHY_MODE2); udelay(200); } /* * Gen-II/IIe PHY_MODE3 errata RM#2: * Achieves better receiver noise performance than the h/w default: */ m3 = readl(port_mmio + PHY_MODE3); m3 = (m3 & 0x1f) | (0x5555601 << 5); /* Guideline 88F5182 (GL# SATA-S11) */ if (IS_SOC(hpriv)) m3 &= ~0x1c; if (fix_phy_mode4) { u32 m4 = readl(port_mmio + PHY_MODE4); /* * Enforce reserved-bit restrictions on GenIIe devices only. * For earlier chipsets, force only the internal config field * (workaround for errata FEr SATA#10 part 1). */ if (IS_GEN_IIE(hpriv)) m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; else m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; writel(m4, port_mmio + PHY_MODE4); } /* * Workaround for 60x1-B2 errata SATA#13: * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, * so we must always rewrite PHY_MODE3 after PHY_MODE4. * Or ensure we use writelfl() when writing PHY_MODE4. */ writel(m3, port_mmio + PHY_MODE3); /* Revert values of pre-emphasis and signal amps to the saved ones */ m2 = readl(port_mmio + PHY_MODE2); m2 &= ~MV_M2_PREAMP_MASK; m2 |= hpriv->signal[port].amps; m2 |= hpriv->signal[port].pre; m2 &= ~(1 << 16); /* according to mvSata 3.6.1, some IIE values are fixed */ if (IS_GEN_IIE(hpriv)) { m2 &= ~0xC30FF01F; m2 |= 0x0000900F; } writel(m2, port_mmio + PHY_MODE2); } /* TODO: use the generic LED interface to configure the SATA Presence */ /* & Acitivy LEDs on the board */ static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x101f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0x800, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio) { void __iomem *hc_mmio = mv_hc_base(mmio, 0); ZERO(0x00c); ZERO(0x010); ZERO(0x014); } #undef ZERO static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int port; for (port = 0; port < hpriv->n_ports; port++) mv_soc_reset_hc_port(hpriv, mmio, port); mv_soc_reset_one_hc(hpriv, mmio); return 0; } static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) { return; } static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 reg; reg = readl(port_mmio + PHY_MODE3); reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ reg |= (0x1 << 27); reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ reg |= (0x1 << 29); writel(reg, port_mmio + PHY_MODE3); reg = readl(port_mmio + PHY_MODE4); reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ reg |= (0x1 << 16); writel(reg, port_mmio + PHY_MODE4); reg = readl(port_mmio + PHY_MODE9_GEN2); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN2); reg = readl(port_mmio + PHY_MODE9_GEN1); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN1); } /** * soc_is_65 - check if the soc is 65 nano device * * Detect the type of the SoC, this is done by reading the PHYCFG_OFS * register, this register should contain non-zero value and it exists only * in the 65 nano devices, when reading it from older devices we get 0. */ static bool soc_is_65n(struct mv_host_priv *hpriv) { void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); if (readl(port0_mmio + PHYCFG_OFS)) return true; return false; } static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) { u32 ifcfg = readl(port_mmio + SATA_IFCFG); ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ if (want_gen2i) ifcfg |= (1 << 7); /* enable gen2i speed */ writelfl(ifcfg, port_mmio + SATA_IFCFG); } static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no) { void __iomem *port_mmio = mv_port_base(mmio, port_no); /* * The datasheet warns against setting EDMA_RESET when EDMA is active * (but doesn't say what the problem might be). So we first try * to disable the EDMA engine before doing the EDMA_RESET operation. */ mv_stop_edma_engine(port_mmio); writelfl(EDMA_RESET, port_mmio + EDMA_CMD); if (!IS_GEN_I(hpriv)) { /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ mv_setup_ifcfg(port_mmio, 1); } /* * Strobing EDMA_RESET here causes a hard reset of the SATA transport, * link, and physical layers. It resets all SATA interface registers * (except for SATA_IFCFG), and issues a COMRESET to the dev. */ writelfl(EDMA_RESET, port_mmio + EDMA_CMD); udelay(25); /* allow reset propagation */ writelfl(0, port_mmio + EDMA_CMD); hpriv->ops->phy_errata(hpriv, mmio, port_no); if (IS_GEN_I(hpriv)) mdelay(1); } static void mv_pmp_select(struct ata_port *ap, int pmp) { if (sata_pmp_supported(ap)) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg = readl(port_mmio + SATA_IFCTL); int old = reg & 0xf; if (old != pmp) { reg = (reg & ~0xf) | pmp; writelfl(reg, port_mmio + SATA_IFCTL); } } } static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return sata_std_hardreset(link, class, deadline); } static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return ata_sff_softreset(link, class, deadline); } static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; void __iomem *mmio = hpriv->base; int rc, attempts = 0, extra = 0; u32 sstatus; bool online; mv_reset_channel(hpriv, mmio, ap->port_no); pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); /* Workaround for errata FEr SATA#10 (part 2) */ do { const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); rc = sata_link_hardreset(link, timing, deadline + extra, &online, NULL); rc = online ? -EAGAIN : rc; if (rc) return rc; sata_scr_read(link, SCR_STATUS, &sstatus); if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { /* Force 1.5gb/s link speed and try again */ mv_setup_ifcfg(mv_ap_base(ap), 0); if (time_after(jiffies + HZ, deadline)) extra = HZ; /* only extend it once, max */ } } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); return rc; } static void mv_eh_freeze(struct ata_port *ap) { mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); } static void mv_eh_thaw(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; unsigned int port = ap->port_no; unsigned int hardport = mv_hardport_from_port(port); void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); void __iomem *port_mmio = mv_ap_base(ap); u32 hc_irq_cause; /* clear EDMA errors on this port */ writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); mv_enable_port_irqs(ap, ERR_IRQ); } /** * mv_port_init - Perform some early initialization on a single port. * @port: libata data structure storing shadow register addresses * @port_mmio: base address of the port * * Initialize shadow register mmio addresses, clear outstanding * interrupts on the port, and unmask interrupts for the future * start of the port. * * LOCKING: * Inherited from caller. */ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) { void __iomem *serr, *shd_base = port_mmio + SHD_BLK; /* PIO related setup */ port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); port->error_addr = port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); port->status_addr = port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); /* special case: control/altstatus doesn't have ATA_REG_ address */ port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; /* Clear any currently outstanding port interrupt conditions */ serr = port_mmio + mv_scr_offset(SCR_ERROR); writelfl(readl(serr), serr); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* unmask all non-transient EDMA error interrupts */ writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", readl(port_mmio + EDMA_CFG), readl(port_mmio + EDMA_ERR_IRQ_CAUSE), readl(port_mmio + EDMA_ERR_IRQ_MASK)); } static unsigned int mv_in_pcix_mode(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) return 0; /* not PCI-X capable */ reg = readl(mmio + MV_PCI_MODE); if ((reg & MV_PCI_MODE_MASK) == 0) return 0; /* conventional PCI mode */ return 1; /* chip is in PCI-X mode */ } static int mv_pci_cut_through_okay(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (!mv_in_pcix_mode(host)) { reg = readl(mmio + MV_PCI_COMMAND); if (reg & MV_PCI_COMMAND_MRDTRIG) return 0; /* not okay */ } return 1; /* okay */ } static void mv_60x1b2_errata_pci7(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; /* workaround for 60x1-B2 errata PCI#7 */ if (mv_in_pcix_mode(host)) { u32 reg = readl(mmio + MV_PCI_COMMAND); writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); } } static int mv_chip_id(struct ata_host *host, unsigned int board_idx) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u32 hp_flags = hpriv->hp_flags; switch (board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x1: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_printk(KERN_WARNING, &pdev->dev, "Applying 50XXB2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_504x: case chip_508x: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_printk(KERN_WARNING, &pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_604x: case chip_608x: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_II; switch (pdev->revision) { case 0x7: mv_60x1b2_errata_pci7(host); hp_flags |= MV_HP_ERRATA_60X1B2; break; case 0x9: hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_printk(KERN_WARNING, &pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1B2; break; } break; case chip_7042: hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; if (pdev->vendor == PCI_VENDOR_ID_TTI && (pdev->device == 0x2300 || pdev->device == 0x2310)) { /* * Highpoint RocketRAID PCIe 23xx series cards: * * Unconfigured drives are treated as "Legacy" * by the BIOS, and it overwrites sector 8 with * a "Lgcy" metadata block prior to Linux boot. * * Configured drives (RAID or JBOD) leave sector 8 * alone, but instead overwrite a high numbered * sector for the RAID metadata. This sector can * be determined exactly, by truncating the physical * drive capacity to a nice even GB value. * * RAID metadata is at: (dev->n_sectors & ~0xfffff) * * Warn the user, lest they think we're just buggy. */ printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" " BIOS CORRUPTS DATA on all attached drives," " regardless of if/how they are configured." " BEWARE!\n"); printk(KERN_WARNING DRV_NAME ": For data safety, do not" " use sectors 8-9 on \"Legacy\" drives," " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } /* drop through */ case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) hp_flags |= MV_HP_CUT_THROUGH; switch (pdev->revision) { case 0x2: /* Rev.B0: the first/only public release */ hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_printk(KERN_WARNING, &pdev->dev, "Applying 60X1C0 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1C0; break; } break; case chip_soc: if (soc_is_65n(hpriv)) hpriv->ops = &mv_soc_65n_ops; else hpriv->ops = &mv_soc_ops; hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | MV_HP_ERRATA_60X1C0; break; default: dev_printk(KERN_ERR, host->dev, "BUG: invalid board index %u\n", board_idx); return 1; } hpriv->hp_flags = hp_flags; if (hp_flags & MV_HP_PCIE) { hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; hpriv->irq_mask_offset = PCIE_IRQ_MASK; hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; } else { hpriv->irq_cause_offset = PCI_IRQ_CAUSE; hpriv->irq_mask_offset = PCI_IRQ_MASK; hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; } return 0; } /** * mv_init_host - Perform some early initialization of the host. * @host: ATA host to initialize * * If possible, do an early global reset of the host. Then do * our port init and clear/unmask all/relevant host interrupts. * * LOCKING: * Inherited from caller. */ static int mv_init_host(struct ata_host *host) { int rc = 0, n_hc, port, hc; struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; rc = mv_chip_id(host, hpriv->board_idx); if (rc) goto done; if (IS_SOC(hpriv)) { hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; } else { hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; } /* initialize shadow irq mask with register's value */ hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); /* global interrupt mask: 0 == mask everything */ mv_set_main_irq_mask(host, ~0, 0); n_hc = mv_get_hc_count(host->ports[0]->flags); for (port = 0; port < host->n_ports; port++) if (hpriv->ops->read_preamp) hpriv->ops->read_preamp(hpriv, port, mmio); rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); if (rc) goto done; hpriv->ops->reset_flash(hpriv, mmio); hpriv->ops->reset_bus(host, mmio); hpriv->ops->enable_leds(hpriv, mmio); for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(mmio, port); mv_port_init(&ap->ioaddr, port_mmio); } for (hc = 0; hc < n_hc; hc++) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " "(before clear)=0x%08x\n", hc, readl(hc_mmio + HC_CFG), readl(hc_mmio + HC_IRQ_CAUSE)); /* Clear any currently outstanding hc interrupt conditions */ writelfl(0, hc_mmio + HC_IRQ_CAUSE); } if (!IS_SOC(hpriv)) { /* Clear any currently outstanding host interrupt conditions */ writelfl(0, mmio + hpriv->irq_cause_offset); /* and unmask interrupt generation for host regs */ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); } /* * enable only global host interrupts for now. * The per-port interrupts get done later as ports are set up. */ mv_set_main_irq_mask(host, 0, PCI_ERR); mv_set_irq_coalescing(host, irq_coalescing_io_count, irq_coalescing_usecs); done: return rc; } static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) { hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, MV_CRQB_Q_SZ, 0); if (!hpriv->crqb_pool) return -ENOMEM; hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, MV_CRPB_Q_SZ, 0); if (!hpriv->crpb_pool) return -ENOMEM; hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, MV_SG_TBL_SZ, 0); if (!hpriv->sg_tbl_pool) return -ENOMEM; return 0; } static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, struct mbus_dram_target_info *dram) { int i; for (i = 0; i < 4; i++) { writel(0, hpriv->base + WINDOW_CTRL(i)); writel(0, hpriv->base + WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->base + WINDOW_CTRL(i)); writel(cs->base, hpriv->base + WINDOW_BASE(i)); } } /** * mv_platform_probe - handle a positive probe of an soc Marvell * host * @pdev: platform device found * * LOCKING: * Inherited from caller. */ static int mv_platform_probe(struct platform_device *pdev) { static int printed_version; const struct mv_sata_platform_data *mv_platform_data; const struct ata_port_info *ppi[] = { &mv_port_info[chip_soc], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; struct resource *res; int n_ports, rc; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); /* * Simple resource validation .. */ if (unlikely(pdev->num_resources != 2)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * Get the register base first */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; /* allocate host */ mv_platform_data = pdev->dev.platform_data; n_ports = mv_platform_data->n_ports; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = chip_soc; host->iomap = NULL; hpriv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); hpriv->base -= SATAHC0_REG_BASE; #if defined(CONFIG_HAVE_CLK) hpriv->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(hpriv->clk)) dev_notice(&pdev->dev, "cannot get clkdev\n"); else clk_enable(hpriv->clk); #endif /* * (Re-)program MBUS remapping windows if we are asked to. */ if (mv_platform_data->dram != NULL) mv_conf_mbus_windows(hpriv, mv_platform_data->dram); rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) goto err; /* initialize adapter */ rc = mv_init_host(host); if (rc) goto err; dev_printk(KERN_INFO, &pdev->dev, "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, host->n_ports); return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, IRQF_SHARED, &mv6_sht); err: #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable(hpriv->clk); clk_put(hpriv->clk); } #endif return rc; } /* * * mv_platform_remove - unplug a platform interface * @pdev: platform device * * A platform bus SATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ static int __devexit mv_platform_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ata_host *host = dev_get_drvdata(dev); #if defined(CONFIG_HAVE_CLK) struct mv_host_priv *hpriv = host->private_data; #endif ata_host_detach(host); #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable(hpriv->clk); clk_put(hpriv->clk); } #endif return 0; } #ifdef CONFIG_PM static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct ata_host *host = dev_get_drvdata(&pdev->dev); if (host) return ata_host_suspend(host, state); else return 0; } static int mv_platform_resume(struct platform_device *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int ret; if (host) { struct mv_host_priv *hpriv = host->private_data; const struct mv_sata_platform_data *mv_platform_data = \ pdev->dev.platform_data; /* * (Re-)program MBUS remapping windows if we are asked to. */ if (mv_platform_data->dram != NULL) mv_conf_mbus_windows(hpriv, mv_platform_data->dram); /* initialize adapter */ ret = mv_init_host(host); if (ret) { printk(KERN_ERR DRV_NAME ": Error during HW init\n"); return ret; } ata_host_resume(host); } return 0; } #else #define mv_platform_suspend NULL #define mv_platform_resume NULL #endif static struct platform_driver mv_platform_driver = { .probe = mv_platform_probe, .remove = __devexit_p(mv_platform_remove), .suspend = mv_platform_suspend, .resume = mv_platform_resume, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, }; #ifdef CONFIG_PCI static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev); #endif static struct pci_driver mv_pci_driver = { .name = DRV_NAME, .id_table = mv_pci_tbl, .probe = mv_pci_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = mv_pci_device_resume, #endif }; /* move to PCI layer or libata core? */ static int pci_go_64(struct pci_dev *pdev) { int rc; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_printk(KERN_ERR, &pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return rc; } /** * mv_print_info - Dump key info to kernel log for perusal. * @host: ATA host to print info about * * FIXME: complete this. * * LOCKING: * Inherited from caller. */ static void mv_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u8 scc; const char *scc_s, *gen; /* Use this to determine the HW stepping of the chip so we know * what errata to workaround */ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); if (scc == 0) scc_s = "SCSI"; else if (scc == 0x01) scc_s = "RAID"; else scc_s = "?"; if (IS_GEN_I(hpriv)) gen = "I"; else if (IS_GEN_II(hpriv)) gen = "II"; else if (IS_GEN_IIE(hpriv)) gen = "IIE"; else gen = "?"; dev_printk(KERN_INFO, &pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n", gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); } /** * mv_pci_init_one - handle a positive probe of a PCI Marvell host * @pdev: PCI device found * @ent: PCI device ID entry for the matched host * * LOCKING: * Inherited from caller. */ static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; unsigned int board_idx = (unsigned int)ent->driver_data; const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; int n_ports, port, rc; if (!printed_version++) dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); /* allocate host */ n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = board_idx; /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); hpriv->base = host->iomap[MV_PRIMARY_BAR]; rc = pci_go_64(pdev); if (rc) return rc; rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) return rc; for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(hpriv->base, port); unsigned int offset = port_mmio - hpriv->base; ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); } /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; /* Enable message-switched interrupts, if requested */ if (msi && pci_enable_msi(pdev) == 0) hpriv->hp_flags |= MV_HP_FLAG_MSI; mv_dump_pci_cfg(pdev, 0x68); mv_print_info(host); pci_set_master(pdev); pci_try_set_mwi(pdev); return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); } #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = dev_get_drvdata(&pdev->dev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; ata_host_resume(host); return 0; } #endif #endif static int mv_platform_probe(struct platform_device *pdev); static int __devexit mv_platform_remove(struct platform_device *pdev); static int __init mv_init(void) { int rc = -ENODEV; #ifdef CONFIG_PCI rc = pci_register_driver(&mv_pci_driver); if (rc < 0) return rc; #endif rc = platform_driver_register(&mv_platform_driver); #ifdef CONFIG_PCI if (rc < 0) pci_unregister_driver(&mv_pci_driver); #endif return rc; } static void __exit mv_exit(void) { #ifdef CONFIG_PCI pci_unregister_driver(&mv_pci_driver); #endif platform_driver_unregister(&mv_platform_driver); } MODULE_AUTHOR("Brett Russ"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME); module_init(mv_init); module_exit(mv_exit);
gpl-2.0
dl12345/kernel_sony_kitakami
net/sctp/ulpevent.c
1481
31338
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * These functions manipulate an sctp event. The struct ulpevent is used * to carry notifications and data to the ULP (sockets). * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * Jon Grimm <jgrimm@us.ibm.com> * La Monte H.P. Yarroll <piggy@acm.org> * Ardelle Fan <ardelle.fan@intel.com> * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #include <linux/slab.h> #include <linux/types.h> #include <linux/skbuff.h> #include <net/sctp/structs.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, struct sctp_association *asoc); static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event); /* Initialize an ULP event from an given skb. */ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags, unsigned int len) { memset(event, 0, sizeof(struct sctp_ulpevent)); event->msg_flags = msg_flags; event->rmem_len = len; } /* Create a new sctp_ulpevent. */ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff *skb; skb = alloc_skb(size, gfp); if (!skb) goto fail; event = sctp_skb2event(skb); sctp_ulpevent_init(event, msg_flags, skb->truesize); return event; fail: return NULL; } /* Is this a MSG_NOTIFICATION? */ int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) { return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); } /* Hold the association in case the msg_name needs read out of * the association. */ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, const struct sctp_association *asoc) { struct sk_buff *skb; /* Cast away the const, as we are just wanting to * bump the reference count. */ sctp_association_hold((struct sctp_association *)asoc); skb = sctp_event2skb(event); event->asoc = (struct sctp_association *)asoc; atomic_add(event->rmem_len, &event->asoc->rmem_alloc); sctp_skb_set_owner_r(skb, asoc->base.sk); } /* A simple destructor to give up the reference to the association. */ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) { struct sctp_association *asoc = event->asoc; atomic_sub(event->rmem_len, &asoc->rmem_alloc); sctp_association_put(asoc); } /* Create and initialize an SCTP_ASSOC_CHANGE event. * * 5.3.1.1 SCTP_ASSOC_CHANGE * * Communication notifications inform the ULP that an SCTP association * has either begun or ended. The identifier for a new association is * provided by this notification. * * Note: There is no field checking here. If a field is unused it will be * zero'd out. */ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; struct sk_buff *skb; /* If the lower layer passed in the chunk, it will be * an ABORT, so we need to include it in the sac_info. */ if (chunk) { /* Copy the chunk data to a new skb and reserve enough * head room to use as notification. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_assoc_change), 0, gfp); if (!skb) goto fail; /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); /* Include the notification structure */ sac = (struct sctp_assoc_change *) skb_push(skb, sizeof(struct sctp_assoc_change)); /* Trim the buffer to the right length. */ skb_trim(skb, sizeof(struct sctp_assoc_change) + ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); } else { event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sac = (struct sctp_assoc_change *) skb_put(skb, sizeof(struct sctp_assoc_change)); } /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_type: * It should be SCTP_ASSOC_CHANGE. */ sac->sac_type = SCTP_ASSOC_CHANGE; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_state: 32 bits (signed integer) * This field holds one of a number of values that communicate the * event that happened to the association. */ sac->sac_state = state; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_flags: 16 bits (unsigned integer) * Currently unused. */ sac->sac_flags = 0; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ sac->sac_length = skb->len; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_error: 32 bits (signed integer) * * If the state was reached due to a error condition (e.g. * COMMUNICATION_LOST) any relevant error information is available in * this field. This corresponds to the protocol error codes defined in * [SCTP]. */ sac->sac_error = error; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_outbound_streams: 16 bits (unsigned integer) * sac_inbound_streams: 16 bits (unsigned integer) * * The maximum number of streams allowed in each direction are * available in sac_outbound_streams and sac_inbound streams. */ sac->sac_outbound_streams = outbound; sac->sac_inbound_streams = inbound; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * sac_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sac->sac_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize an SCTP_PEER_ADDR_CHANGE event. * * Socket Extensions for SCTP - draft-01 * 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * When a destination address on a multi-homed peer encounters a change * an interface details event is sent. */ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( const struct sctp_association *asoc, const struct sockaddr_storage *aaddr, int flags, int state, int error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_paddr_change *spc; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); spc = (struct sctp_paddr_change *) skb_put(skb, sizeof(struct sctp_paddr_change)); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_type: * * It should be SCTP_PEER_ADDR_CHANGE. */ spc->spc_type = SCTP_PEER_ADDR_CHANGE; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_length: sizeof (__u32) * * This field is the total length of the notification data, including * the notification header. */ spc->spc_length = sizeof(struct sctp_paddr_change); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_flags: 16 bits (unsigned integer) * Currently unused. */ spc->spc_flags = 0; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_state: 32 bits (signed integer) * * This field holds one of a number of values that communicate the * event that happened to the address. */ spc->spc_state = state; /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_error: 32 bits (signed integer) * * If the state was reached due to any error condition (e.g. * ADDRESS_UNREACHABLE) any relevant error information is available in * this field. */ spc->spc_error = error; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE * * spc_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); spc->spc_assoc_id = sctp_assoc2id(asoc); /* Sockets API Extensions for SCTP * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE * * spc_aaddr: sizeof (struct sockaddr_storage) * * The affected address field, holds the remote peer's address that is * encountering the change of state. */ memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage)); /* Map ipv4 address into v4-mapped-on-v6 address. */ sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_v4map( sctp_sk(asoc->base.sk), (union sctp_addr *)&spc->spc_aaddr); return event; fail: return NULL; } /* Create and initialize an SCTP_REMOTE_ERROR notification. * * Note: This assumes that the chunk->skb->data already points to the * operation error payload. * * Socket Extensions for SCTP - draft-01 * 5.3.1.3 SCTP_REMOTE_ERROR * * A remote peer may send an Operational Error message to its peer. * This message indicates a variety of error conditions on an * association. The entire error TLV as it appears on the wire is * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP * specification [SCTP] and any extensions for a list of possible * error formats. */ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_remote_error *sre; struct sk_buff *skb; sctp_errhdr_t *ch; __be16 cause; int elen; ch = (sctp_errhdr_t *)(chunk->skb->data); cause = ch->cause; elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t); /* Pull off the ERROR header. */ skb_pull(chunk->skb, sizeof(sctp_errhdr_t)); /* Copy the skb to a new skb with room for us to prepend * notification with. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 0, gfp); /* Pull off the rest of the cause TLV from the chunk. */ skb_pull(chunk->skb, elen); if (!skb) goto fail; /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); sre = (struct sctp_remote_error *) skb_push(skb, sizeof(struct sctp_remote_error)); /* Trim the buffer to the right length. */ skb_trim(skb, sizeof(struct sctp_remote_error) + elen); /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_type: * It should be SCTP_REMOTE_ERROR. */ sre->sre_type = SCTP_REMOTE_ERROR; /* * Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_flags: 16 bits (unsigned integer) * Currently unused. */ sre->sre_flags = 0; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_length: sizeof (__u32) * * This field is the total length of the notification data, * including the notification header. */ sre->sre_length = skb->len; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_error: 16 bits (unsigned integer) * This value represents one of the Operational Error causes defined in * the SCTP specification, in network byte order. */ sre->sre_error = cause; /* Socket Extensions for SCTP * 5.3.1.3 SCTP_REMOTE_ERROR * * sre_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sre->sre_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_SEND_FAILED notification. * * Socket Extensions for SCTP - draft-01 * 5.3.1.4 SCTP_SEND_FAILED */ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, __u32 error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_send_failed *ssf; struct sk_buff *skb; /* Pull off any padding. */ int len = ntohs(chunk->chunk_hdr->length); /* Make skb with more room so we can prepend notification. */ skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_send_failed), /* headroom */ 0, /* tailroom */ gfp); if (!skb) goto fail; /* Pull off the common chunk header and DATA header. */ skb_pull(skb, sizeof(struct sctp_data_chunk)); len -= sizeof(struct sctp_data_chunk); /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); ssf = (struct sctp_send_failed *) skb_push(skb, sizeof(struct sctp_send_failed)); /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_type: * It should be SCTP_SEND_FAILED. */ ssf->ssf_type = SCTP_SEND_FAILED; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_flags: 16 bits (unsigned integer) * The flag value will take one of the following values * * SCTP_DATA_UNSENT - Indicates that the data was never put on * the wire. * * SCTP_DATA_SENT - Indicates that the data was put on the wire. * Note that this does not necessarily mean that the * data was (or was not) successfully delivered. */ ssf->ssf_flags = flags; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ ssf->ssf_length = sizeof(struct sctp_send_failed) + len; skb_trim(skb, ssf->ssf_length); /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_error: 16 bits (unsigned integer) * This value represents the reason why the send failed, and if set, * will be a SCTP protocol error code as defined in [SCTP] section * 3.3.10. */ ssf->ssf_error = error; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_info: sizeof (struct sctp_sndrcvinfo) * The original send information associated with the undelivered * message. */ memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); /* Per TSVWG discussion with Randy. Allow the application to * reassemble a fragmented message. */ ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; /* Socket Extensions for SCTP * 5.3.1.4 SCTP_SEND_FAILED * * ssf_assoc_id: sizeof (sctp_assoc_t) * The association id field, sf_assoc_id, holds the identifier for the * association. All notifications for a given association have the * same association identifier. For TCP style socket, this field is * ignored. */ sctp_ulpevent_set_owner(event, asoc); ssf->ssf_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_SHUTDOWN_EVENT notification. * * Socket Extensions for SCTP - draft-01 * 5.3.1.5 SCTP_SHUTDOWN_EVENT */ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( const struct sctp_association *asoc, __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_shutdown_event *sse; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sse = (struct sctp_shutdown_event *) skb_put(skb, sizeof(struct sctp_shutdown_event)); /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_type * It should be SCTP_SHUTDOWN_EVENT */ sse->sse_type = SCTP_SHUTDOWN_EVENT; /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_flags: 16 bits (unsigned integer) * Currently unused. */ sse->sse_flags = 0; /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_length: sizeof (__u32) * This field is the total length of the notification data, including * the notification header. */ sse->sse_length = sizeof(struct sctp_shutdown_event); /* Socket Extensions for SCTP * 5.3.1.5 SCTP_SHUTDOWN_EVENT * * sse_assoc_id: sizeof (sctp_assoc_t) * The association id field, holds the identifier for the association. * All notifications for a given association have the same association * identifier. For TCP style socket, this field is ignored. */ sctp_ulpevent_set_owner(event, asoc); sse->sse_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* Create and initialize a SCTP_ADAPTATION_INDICATION notification. * * Socket Extensions for SCTP * 5.3.1.6 SCTP_ADAPTATION_INDICATION */ struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_adaptation_event *sai; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); sai = (struct sctp_adaptation_event *) skb_put(skb, sizeof(struct sctp_adaptation_event)); sai->sai_type = SCTP_ADAPTATION_INDICATION; sai->sai_flags = 0; sai->sai_length = sizeof(struct sctp_adaptation_event); sai->sai_adaptation_ind = asoc->peer.adaptation_ind; sctp_ulpevent_set_owner(event, asoc); sai->sai_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* A message has been received. Package this message as a notification * to pass it to the upper layers. Go ahead and calculate the sndrcvinfo * even if filtered out later. * * Socket Extensions for SCTP * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) */ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event = NULL; struct sk_buff *skb; size_t padding, len; int rx_count; /* * check to see if we need to make space for this * new skb, expand the rcvbuffer if needed, or drop * the frame */ if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); else rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); if (rx_count >= asoc->base.sk->sk_rcvbuf) { if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || (!sk_rmem_schedule(asoc->base.sk, chunk->skb, chunk->skb->truesize))) goto fail; } /* Clone the original skb, sharing the data. */ skb = skb_clone(chunk->skb, gfp); if (!skb) goto fail; /* Now that all memory allocations for this chunk succeeded, we * can mark it as received so the tsn_map is updated correctly. */ if (sctp_tsnmap_mark(&asoc->peer.tsn_map, ntohl(chunk->subh.data_hdr->tsn), chunk->transport)) goto fail_mark; /* First calculate the padding, so we don't inadvertently * pass up the wrong length to the user. * * RFC 2960 - Section 3.2 Chunk Field Descriptions * * The total length of a chunk(including Type, Length and Value fields) * MUST be a multiple of 4 bytes. If the length of the chunk is not a * multiple of 4 bytes, the sender MUST pad the chunk with all zero * bytes and this padding is not included in the chunk length field. * The sender should never pad with more than 3 bytes. The receiver * MUST ignore the padding bytes. */ len = ntohs(chunk->chunk_hdr->length); padding = WORD_ROUND(len) - len; /* Fixup cloned skb with just this chunks data. */ skb_trim(skb, chunk->chunk_end - padding - skb->data); /* Embed the event fields inside the cloned skb. */ event = sctp_skb2event(skb); /* Initialize event with flags 0 and correct length * Since this is a clone of the original skb, only account for * the data of this chunk as other chunks will be accounted separately. */ sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); sctp_ulpevent_receive_data(event, asoc); event->stream = ntohs(chunk->subh.data_hdr->stream); event->ssn = ntohs(chunk->subh.data_hdr->ssn); event->ppid = chunk->subh.data_hdr->ppid; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { event->flags |= SCTP_UNORDERED; event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); } event->tsn = ntohl(chunk->subh.data_hdr->tsn); event->msg_flags |= chunk->chunk_hdr->flags; event->iif = sctp_chunk_iif(chunk); return event; fail_mark: kfree_skb(skb); fail: return NULL; } /* Create a partial delivery related event. * * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT * * When a receiver is engaged in a partial delivery of a * message this notification will be used to indicate * various events. */ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, __u32 indication, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_pdapi_event *pd; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); pd = (struct sctp_pdapi_event *) skb_put(skb, sizeof(struct sctp_pdapi_event)); /* pdapi_type * It should be SCTP_PARTIAL_DELIVERY_EVENT * * pdapi_flags: 16 bits (unsigned integer) * Currently unused. */ pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; pd->pdapi_flags = 0; /* pdapi_length: 32 bits (unsigned integer) * * This field is the total length of the notification data, including * the notification header. It will generally be sizeof (struct * sctp_pdapi_event). */ pd->pdapi_length = sizeof(struct sctp_pdapi_event); /* pdapi_indication: 32 bits (unsigned integer) * * This field holds the indication being sent to the application. */ pd->pdapi_indication = indication; /* pdapi_assoc_id: sizeof (sctp_assoc_t) * * The association id field, holds the identifier for the association. */ sctp_ulpevent_set_owner(event, asoc); pd->pdapi_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } struct sctp_ulpevent *sctp_ulpevent_make_authkey( const struct sctp_association *asoc, __u16 key_id, __u32 indication, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_authkey_event *ak; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), MSG_NOTIFICATION, gfp); if (!event) goto fail; skb = sctp_event2skb(event); ak = (struct sctp_authkey_event *) skb_put(skb, sizeof(struct sctp_authkey_event)); ak->auth_type = SCTP_AUTHENTICATION_EVENT; ak->auth_flags = 0; ak->auth_length = sizeof(struct sctp_authkey_event); ak->auth_keynumber = key_id; ak->auth_altkeynumber = 0; ak->auth_indication = indication; /* * The association id field, holds the identifier for the association. */ sctp_ulpevent_set_owner(event, asoc); ak->auth_assoc_id = sctp_assoc2id(asoc); return event; fail: return NULL; } /* * Socket Extensions for SCTP * 6.3.10. SCTP_SENDER_DRY_EVENT */ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_sender_dry_event *sdry; struct sk_buff *skb; event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), MSG_NOTIFICATION, gfp); if (!event) return NULL; skb = sctp_event2skb(event); sdry = (struct sctp_sender_dry_event *) skb_put(skb, sizeof(struct sctp_sender_dry_event)); sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT; sdry->sender_dry_flags = 0; sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event); sctp_ulpevent_set_owner(event, asoc); sdry->sender_dry_assoc_id = sctp_assoc2id(asoc); return event; } /* Return the notification type, assuming this is a notification * event. */ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) { union sctp_notification *notification; struct sk_buff *skb; skb = sctp_event2skb(event); notification = (union sctp_notification *) skb->data; return notification->sn_header.sn_type; } /* Copy out the sndrcvinfo into a msghdr. */ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *msghdr) { struct sctp_sndrcvinfo sinfo; if (sctp_ulpevent_is_notification(event)) return; /* Sockets API Extensions for SCTP * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) * * sinfo_stream: 16 bits (unsigned integer) * * For recvmsg() the SCTP stack places the message's stream number in * this value. */ sinfo.sinfo_stream = event->stream; /* sinfo_ssn: 16 bits (unsigned integer) * * For recvmsg() this value contains the stream sequence number that * the remote endpoint placed in the DATA chunk. For fragmented * messages this is the same number for all deliveries of the message * (if more than one recvmsg() is needed to read the message). */ sinfo.sinfo_ssn = event->ssn; /* sinfo_ppid: 32 bits (unsigned integer) * * In recvmsg() this value is * the same information that was passed by the upper layer in the peer * application. Please note that byte order issues are NOT accounted * for and this information is passed opaquely by the SCTP stack from * one end to the other. */ sinfo.sinfo_ppid = event->ppid; /* sinfo_flags: 16 bits (unsigned integer) * * This field may contain any of the following flags and is composed of * a bitwise OR of these values. * * recvmsg() flags: * * SCTP_UNORDERED - This flag is present when the message was sent * non-ordered. */ sinfo.sinfo_flags = event->flags; /* sinfo_tsn: 32 bit (unsigned integer) * * For the receiving side, this field holds a TSN that was * assigned to one of the SCTP Data Chunks. */ sinfo.sinfo_tsn = event->tsn; /* sinfo_cumtsn: 32 bit (unsigned integer) * * This field will hold the current cumulative TSN as * known by the underlying SCTP layer. Note this field is * ignored when sending and only valid for a receive * operation when sinfo_flags are set to SCTP_UNORDERED. */ sinfo.sinfo_cumtsn = event->cumtsn; /* sinfo_assoc_id: sizeof (sctp_assoc_t) * * The association handle field, sinfo_assoc_id, holds the identifier * for the association announced in the COMMUNICATION_UP notification. * All notifications for a given association have the same identifier. * Ignored for one-to-one style sockets. */ sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); /* context value that is set via SCTP_CONTEXT socket option. */ sinfo.sinfo_context = event->asoc->default_rcv_context; /* These fields are not used while receiving. */ sinfo.sinfo_timetolive = 0; put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); } /* Do accounting for bytes received and hold a reference to the association * for each skb. */ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, struct sctp_association *asoc) { struct sk_buff *skb, *frag; skb = sctp_event2skb(event); /* Set the owner and charge rwnd for bytes received. */ sctp_ulpevent_set_owner(event, asoc); sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); if (!skb->data_len) return; /* Note: Not clearing the entire event struct as this is just a * fragment of the real event. However, we still need to do rwnd * accounting. * In general, the skb passed from IP can have only 1 level of * fragments. But we allow multiple levels of fragments. */ skb_walk_frags(skb, frag) sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); } /* Do accounting for bytes just read by user and release the references to * the association. */ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) { struct sk_buff *skb, *frag; unsigned int len; /* Current stack structures assume that the rcv buffer is * per socket. For UDP style sockets this is not true as * multiple associations may be on a single UDP-style socket. * Use the local private area of the skb to track the owning * association. */ skb = sctp_event2skb(event); len = skb->len; if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) { /* NOTE: skb_shinfos are recursive. Although IP returns * skb's with only 1 level of fragments, SCTP reassembly can * increase the levels. */ sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); } done: sctp_assoc_rwnd_increase(event->asoc, len); sctp_ulpevent_release_owner(event); } static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) { struct sk_buff *skb, *frag; skb = sctp_event2skb(event); if (!skb->data_len) goto done; /* Don't forget the fragments. */ skb_walk_frags(skb, frag) { /* NOTE: skb_shinfos are recursive. Although IP returns * skb's with only 1 level of fragments, SCTP reassembly can * increase the levels. */ sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); } done: sctp_ulpevent_release_owner(event); } /* Free a ulpevent that has an owner. It includes releasing the reference * to the owner, updating the rwnd in case of a DATA event and freeing the * skb. */ void sctp_ulpevent_free(struct sctp_ulpevent *event) { if (sctp_ulpevent_is_notification(event)) sctp_ulpevent_release_owner(event); else sctp_ulpevent_release_data(event); kfree_skb(sctp_event2skb(event)); } /* Purge the skb lists holding ulpevents. */ unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) { struct sk_buff *skb; unsigned int data_unread = 0; while ((skb = skb_dequeue(list)) != NULL) { struct sctp_ulpevent *event = sctp_skb2event(skb); if (!sctp_ulpevent_is_notification(event)) data_unread += skb->len; sctp_ulpevent_free(event); } return data_unread; }
gpl-2.0
crazy-canux/linux
drivers/hwmon/adt7411.c
1481
9497
/* * Driver for the ADT7411 (I2C/SPI 8 channel 10 bit ADC & temperature-sensor) * * Copyright (C) 2008, 2010 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * TODO: SPI, support for external temperature sensor * use power-down mode for suspend?, interrupt handling? */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/slab.h> #define ADT7411_REG_INT_TEMP_VDD_LSB 0x03 #define ADT7411_REG_EXT_TEMP_AIN14_LSB 0x04 #define ADT7411_REG_VDD_MSB 0x06 #define ADT7411_REG_INT_TEMP_MSB 0x07 #define ADT7411_REG_EXT_TEMP_AIN1_MSB 0x08 #define ADT7411_REG_CFG1 0x18 #define ADT7411_CFG1_START_MONITOR (1 << 0) #define ADT7411_REG_CFG2 0x19 #define ADT7411_CFG2_DISABLE_AVG (1 << 5) #define ADT7411_REG_CFG3 0x1a #define ADT7411_CFG3_ADC_CLK_225 (1 << 0) #define ADT7411_CFG3_REF_VDD (1 << 4) #define ADT7411_REG_DEVICE_ID 0x4d #define ADT7411_REG_MANUFACTURER_ID 0x4e #define ADT7411_DEVICE_ID 0x2 #define ADT7411_MANUFACTURER_ID 0x41 static const unsigned short normal_i2c[] = { 0x48, 0x4a, 0x4b, I2C_CLIENT_END }; struct adt7411_data { struct mutex device_lock; /* for "atomic" device accesses */ struct mutex update_lock; unsigned long next_update; int vref_cached; struct i2c_client *client; }; /* * When reading a register containing (up to 4) lsb, all associated * msb-registers get locked by the hardware. After _one_ of those msb is read, * _all_ are unlocked. In order to use this locking correctly, reading lsb/msb * is protected here with a mutex, too. */ static int adt7411_read_10_bit(struct i2c_client *client, u8 lsb_reg, u8 msb_reg, u8 lsb_shift) { struct adt7411_data *data = i2c_get_clientdata(client); int val, tmp; mutex_lock(&data->device_lock); val = i2c_smbus_read_byte_data(client, lsb_reg); if (val < 0) goto exit_unlock; tmp = (val >> lsb_shift) & 3; val = i2c_smbus_read_byte_data(client, msb_reg); if (val >= 0) val = (val << 2) | tmp; exit_unlock: mutex_unlock(&data->device_lock); return val; } static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit, bool flag) { struct adt7411_data *data = i2c_get_clientdata(client); int ret, val; mutex_lock(&data->device_lock); ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) goto exit_unlock; if (flag) val = ret | bit; else val = ret & ~bit; ret = i2c_smbus_write_byte_data(client, reg, val); exit_unlock: mutex_unlock(&data->device_lock); return ret; } static ssize_t adt7411_show_vdd(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7411_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_VDD_MSB, 2); return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024); } static ssize_t adt7411_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct adt7411_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int val = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB, 0); if (val < 0) return val; val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */ return sprintf(buf, "%d\n", val * 250); } static ssize_t adt7411_show_input(struct device *dev, struct device_attribute *attr, char *buf) { int nr = to_sensor_dev_attr(attr)->index; struct adt7411_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int val; u8 lsb_reg, lsb_shift; mutex_lock(&data->update_lock); if (time_after_eq(jiffies, data->next_update)) { val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3); if (val < 0) goto exit_unlock; if (val & ADT7411_CFG3_REF_VDD) { val = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_VDD_MSB, 2); if (val < 0) goto exit_unlock; data->vref_cached = val * 7000 / 1024; } else { data->vref_cached = 2250; } data->next_update = jiffies + HZ; } lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2); lsb_shift = 2 * (nr & 0x03); val = adt7411_read_10_bit(client, lsb_reg, ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift); if (val < 0) goto exit_unlock; val = sprintf(buf, "%u\n", val * data->vref_cached / 1024); exit_unlock: mutex_unlock(&data->update_lock); return val; } static ssize_t adt7411_show_bit(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr); struct adt7411_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int ret = i2c_smbus_read_byte_data(client, attr2->index); return ret < 0 ? ret : sprintf(buf, "%u\n", !!(ret & attr2->nr)); } static ssize_t adt7411_set_bit(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *s_attr2 = to_sensor_dev_attr_2(attr); struct adt7411_data *data = dev_get_drvdata(dev); struct i2c_client *client = data->client; int ret; unsigned long flag; ret = kstrtoul(buf, 0, &flag); if (ret || flag > 1) return -EINVAL; ret = adt7411_modify_bit(client, s_attr2->index, s_attr2->nr, flag); /* force update */ mutex_lock(&data->update_lock); data->next_update = jiffies; mutex_unlock(&data->update_lock); return ret < 0 ? ret : count; } #define ADT7411_BIT_ATTR(__name, __reg, __bit) \ SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \ adt7411_set_bit, __bit, __reg) static DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL); static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7); static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG); static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225); static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD); static struct attribute *adt7411_attrs[] = { &dev_attr_temp1_input.attr, &dev_attr_in0_input.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_no_average.dev_attr.attr, &sensor_dev_attr_fast_sampling.dev_attr.attr, &sensor_dev_attr_adc_ref_vdd.dev_attr.attr, NULL }; ATTRIBUTE_GROUPS(adt7411); static int adt7411_detect(struct i2c_client *client, struct i2c_board_info *info) { int val; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; val = i2c_smbus_read_byte_data(client, ADT7411_REG_MANUFACTURER_ID); if (val < 0 || val != ADT7411_MANUFACTURER_ID) { dev_dbg(&client->dev, "Wrong manufacturer ID. Got %d, expected %d\n", val, ADT7411_MANUFACTURER_ID); return -ENODEV; } val = i2c_smbus_read_byte_data(client, ADT7411_REG_DEVICE_ID); if (val < 0 || val != ADT7411_DEVICE_ID) { dev_dbg(&client->dev, "Wrong device ID. Got %d, expected %d\n", val, ADT7411_DEVICE_ID); return -ENODEV; } strlcpy(info->type, "adt7411", I2C_NAME_SIZE); return 0; } static int adt7411_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; struct adt7411_data *data; struct device *hwmon_dev; int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->client = client; mutex_init(&data->device_lock); mutex_init(&data->update_lock); ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, ADT7411_CFG1_START_MONITOR, 1); if (ret < 0) return ret; /* force update on first occasion */ data->next_update = jiffies; hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, adt7411_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct i2c_device_id adt7411_id[] = { { "adt7411", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, adt7411_id); static struct i2c_driver adt7411_driver = { .driver = { .name = "adt7411", }, .probe = adt7411_probe, .id_table = adt7411_id, .detect = adt7411_detect, .address_list = normal_i2c, .class = I2C_CLASS_HWMON, }; module_i2c_driver(adt7411_driver); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and " "Wolfram Sang <w.sang@pengutronix.de>"); MODULE_DESCRIPTION("ADT7411 driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
hurrian/kernel_samsung_trelte
drivers/acpi/apei/einj.c
2249
20712
/* * APEI Error INJection support * * EINJ provides a hardware error injection mechanism, this is useful * for debugging and testing of other APEI and RAS features. * * For more information about EINJ, please refer to ACPI Specification * version 4.0, section 17.5. * * Copyright 2009-2010 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/nmi.h> #include <linux/delay.h> #include <acpi/acpi.h> #include "apei-internal.h" #define EINJ_PFX "EINJ: " #define SPIN_UNIT 100 /* 100ns */ /* Firmware should respond within 1 milliseconds */ #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) /* * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action. */ static int acpi5; struct set_error_type_with_address { u32 type; u32 vendor_extension; u32 flags; u32 apicid; u64 memory_address; u64 memory_address_range; u32 pcie_sbdf; }; enum { SETWA_FLAGS_APICID = 1, SETWA_FLAGS_MEM = 2, SETWA_FLAGS_PCIE_SBDF = 4, }; /* * Vendor extensions for platform specific operations */ struct vendor_error_type_extension { u32 length; u32 pcie_sbdf; u16 vendor_id; u16 device_id; u8 rev_id; u8 reserved[3]; }; static u32 notrigger; static u32 vendor_flags; static struct debugfs_blob_wrapper vendor_blob; static char vendor_dev[64]; /* * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the * EINJ table through an unpublished extension. Use with caution as * most will ignore the parameter and make their own choice of address * for error injection. This extension is used only if * param_extension module parameter is specified. */ struct einj_parameter { u64 type; u64 reserved1; u64 reserved2; u64 param1; u64 param2; }; #define EINJ_OP_BUSY 0x1 #define EINJ_STATUS_SUCCESS 0x0 #define EINJ_STATUS_FAIL 0x1 #define EINJ_STATUS_INVAL 0x2 #define EINJ_TAB_ENTRY(tab) \ ((struct acpi_whea_header *)((char *)(tab) + \ sizeof(struct acpi_table_einj))) static bool param_extension; module_param(param_extension, bool, 0); static struct acpi_table_einj *einj_tab; static struct apei_resources einj_resources; static struct apei_exec_ins_type einj_ins_type[] = { [ACPI_EINJ_READ_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register, }, [ACPI_EINJ_READ_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_read_register_value, }, [ACPI_EINJ_WRITE_REGISTER] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register, }, [ACPI_EINJ_WRITE_REGISTER_VALUE] = { .flags = APEI_EXEC_INS_ACCESS_REGISTER, .run = apei_exec_write_register_value, }, [ACPI_EINJ_NOOP] = { .flags = 0, .run = apei_exec_noop, }, }; /* * Prevent EINJ interpreter to run simultaneously, because the * corresponding firmware implementation may not work properly when * invoked simultaneously. */ static DEFINE_MUTEX(einj_mutex); static void *einj_param; static void einj_exec_ctx_init(struct apei_exec_context *ctx) { apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); } static int __einj_get_available_error_type(u32 *type) { struct apei_exec_context ctx; int rc; einj_exec_ctx_init(&ctx); rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); if (rc) return rc; *type = apei_exec_ctx_get_output(&ctx); return 0; } /* Get error injection capabilities of the platform */ static int einj_get_available_error_type(u32 *type) { int rc; mutex_lock(&einj_mutex); rc = __einj_get_available_error_type(type); mutex_unlock(&einj_mutex); return rc; } static int einj_timedout(u64 *t) { if ((s64)*t < SPIN_UNIT) { pr_warning(FW_WARN EINJ_PFX "Firmware does not respond in time\n"); return 1; } *t -= SPIN_UNIT; ndelay(SPIN_UNIT); touch_nmi_watchdog(); return 0; } static void check_vendor_extension(u64 paddr, struct set_error_type_with_address *v5param) { int offset = v5param->vendor_extension; struct vendor_error_type_extension *v; u32 sbdf; if (!offset) return; v = acpi_os_map_memory(paddr + offset, sizeof(*v)); if (!v) return; sbdf = v->pcie_sbdf; sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", sbdf >> 24, (sbdf >> 16) & 0xff, (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, v->vendor_id, v->device_id, v->rev_id); acpi_os_unmap_memory(v, sizeof(*v)); } static void *einj_get_parameter_address(void) { int i; u64 paddrv4 = 0, paddrv5 = 0; struct acpi_whea_header *entry; entry = EINJ_TAB_ENTRY(einj_tab); for (i = 0; i < einj_tab->entries; i++) { if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) memcpy(&paddrv4, &entry->register_region.address, sizeof(paddrv4)); if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && entry->instruction == ACPI_EINJ_WRITE_REGISTER && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) memcpy(&paddrv5, &entry->register_region.address, sizeof(paddrv5)); entry++; } if (paddrv5) { struct set_error_type_with_address *v5param; v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); if (v5param) { acpi5 = 1; check_vendor_extension(paddrv5, v5param); return v5param; } } if (param_extension && paddrv4) { struct einj_parameter *v4param; v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); if (!v4param) return NULL; if (v4param->reserved1 || v4param->reserved2) { acpi_os_unmap_memory(v4param, sizeof(*v4param)); return NULL; } return v4param; } return NULL; } /* do sanity check to trigger table */ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) { if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) return -EINVAL; if (trigger_tab->table_size > PAGE_SIZE || trigger_tab->table_size < trigger_tab->header_size) return -EINVAL; if (trigger_tab->entry_count != (trigger_tab->table_size - trigger_tab->header_size) / sizeof(struct acpi_einj_entry)) return -EINVAL; return 0; } static struct acpi_generic_address *einj_get_trigger_parameter_region( struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2) { int i; struct acpi_whea_header *entry; entry = (struct acpi_whea_header *) ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); for (i = 0; i < trigger_tab->entry_count; i++) { if (entry->action == ACPI_EINJ_TRIGGER_ERROR && entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE && entry->register_region.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && (entry->register_region.address & param2) == (param1 & param2)) return &entry->register_region; entry++; } return NULL; } /* Execute instructions in trigger error action table */ static int __einj_error_trigger(u64 trigger_paddr, u32 type, u64 param1, u64 param2) { struct acpi_einj_trigger *trigger_tab = NULL; struct apei_exec_context trigger_ctx; struct apei_resources trigger_resources; struct acpi_whea_header *trigger_entry; struct resource *r; u32 table_size; int rc = -EIO; struct acpi_generic_address *trigger_param_region = NULL; r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err(EINJ_PFX "Can not request [mem %#010llx-%#010llx] for Trigger table\n", (unsigned long long)trigger_paddr, (unsigned long long)trigger_paddr + sizeof(*trigger_tab) - 1); goto out; } trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); if (!trigger_tab) { pr_err(EINJ_PFX "Failed to map trigger table!\n"); goto out_rel_header; } rc = einj_check_trigger_header(trigger_tab); if (rc) { pr_warning(FW_BUG EINJ_PFX "The trigger error action table is invalid\n"); goto out_rel_header; } /* No action structures in the TRIGGER_ERROR table, nothing to do */ if (!trigger_tab->entry_count) goto out_rel_header; rc = -EIO; table_size = trigger_tab->table_size; r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), table_size - sizeof(*trigger_tab), "APEI EINJ Trigger Table"); if (!r) { pr_err(EINJ_PFX "Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n", (unsigned long long)trigger_paddr + sizeof(*trigger_tab), (unsigned long long)trigger_paddr + table_size - 1); goto out_rel_header; } iounmap(trigger_tab); trigger_tab = ioremap_cache(trigger_paddr, table_size); if (!trigger_tab) { pr_err(EINJ_PFX "Failed to map trigger table!\n"); goto out_rel_entry; } trigger_entry = (struct acpi_whea_header *) ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); apei_resources_init(&trigger_resources); apei_exec_ctx_init(&trigger_ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), trigger_entry, trigger_tab->entry_count); rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); if (rc) goto out_fini; rc = apei_resources_sub(&trigger_resources, &einj_resources); if (rc) goto out_fini; /* * Some firmware will access target address specified in * param1 to trigger the error when injecting memory error. * This will cause resource conflict with regular memory. So * remove it from trigger table resources. */ if ((param_extension || acpi5) && (type & 0x0038) && param2) { struct apei_resources addr_resources; apei_resources_init(&addr_resources); trigger_param_region = einj_get_trigger_parameter_region( trigger_tab, param1, param2); if (trigger_param_region) { rc = apei_resources_add(&addr_resources, trigger_param_region->address, trigger_param_region->bit_width/8, true); if (rc) goto out_fini; rc = apei_resources_sub(&trigger_resources, &addr_resources); } apei_resources_fini(&addr_resources); if (rc) goto out_fini; } rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); if (rc) goto out_fini; rc = apei_exec_pre_map_gars(&trigger_ctx); if (rc) goto out_release; rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); apei_exec_post_unmap_gars(&trigger_ctx); out_release: apei_resources_release(&trigger_resources); out_fini: apei_resources_fini(&trigger_resources); out_rel_entry: release_mem_region(trigger_paddr + sizeof(*trigger_tab), table_size - sizeof(*trigger_tab)); out_rel_header: release_mem_region(trigger_paddr, sizeof(*trigger_tab)); out: if (trigger_tab) iounmap(trigger_tab); return rc; } static int __einj_error_inject(u32 type, u64 param1, u64 param2) { struct apei_exec_context ctx; u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; int rc; einj_exec_ctx_init(&ctx); rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); if (rc) return rc; apei_exec_ctx_set_input(&ctx, type); if (acpi5) { struct set_error_type_with_address *v5param = einj_param; v5param->type = type; if (type & 0x80000000) { switch (vendor_flags) { case SETWA_FLAGS_APICID: v5param->apicid = param1; break; case SETWA_FLAGS_MEM: v5param->memory_address = param1; v5param->memory_address_range = param2; break; case SETWA_FLAGS_PCIE_SBDF: v5param->pcie_sbdf = param1; break; } v5param->flags = vendor_flags; } else { switch (type) { case ACPI_EINJ_PROCESSOR_CORRECTABLE: case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: case ACPI_EINJ_PROCESSOR_FATAL: v5param->apicid = param1; v5param->flags = SETWA_FLAGS_APICID; break; case ACPI_EINJ_MEMORY_CORRECTABLE: case ACPI_EINJ_MEMORY_UNCORRECTABLE: case ACPI_EINJ_MEMORY_FATAL: v5param->memory_address = param1; v5param->memory_address_range = param2; v5param->flags = SETWA_FLAGS_MEM; break; case ACPI_EINJ_PCIX_CORRECTABLE: case ACPI_EINJ_PCIX_UNCORRECTABLE: case ACPI_EINJ_PCIX_FATAL: v5param->pcie_sbdf = param1; v5param->flags = SETWA_FLAGS_PCIE_SBDF; break; } } } else { rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); if (rc) return rc; if (einj_param) { struct einj_parameter *v4param = einj_param; v4param->param1 = param1; v4param->param2 = param2; } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); if (rc) return rc; for (;;) { rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (!(val & EINJ_OP_BUSY)) break; if (einj_timedout(&timeout)) return -EIO; } rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); if (val != EINJ_STATUS_SUCCESS) return -EBUSY; rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); if (rc) return rc; trigger_paddr = apei_exec_ctx_get_output(&ctx); if (notrigger == 0) { rc = __einj_error_trigger(trigger_paddr, type, param1, param2); if (rc) return rc; } rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); return rc; } /* Inject the specified hardware error */ static int einj_error_inject(u32 type, u64 param1, u64 param2) { int rc; mutex_lock(&einj_mutex); rc = __einj_error_inject(type, param1, param2); mutex_unlock(&einj_mutex); return rc; } static u32 error_type; static u64 error_param1; static u64 error_param2; static struct dentry *einj_debug_dir; static int available_error_type_show(struct seq_file *m, void *v) { int rc; u32 available_error_type = 0; rc = einj_get_available_error_type(&available_error_type); if (rc) return rc; if (available_error_type & 0x0001) seq_printf(m, "0x00000001\tProcessor Correctable\n"); if (available_error_type & 0x0002) seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n"); if (available_error_type & 0x0004) seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n"); if (available_error_type & 0x0008) seq_printf(m, "0x00000008\tMemory Correctable\n"); if (available_error_type & 0x0010) seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n"); if (available_error_type & 0x0020) seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n"); if (available_error_type & 0x0040) seq_printf(m, "0x00000040\tPCI Express Correctable\n"); if (available_error_type & 0x0080) seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n"); if (available_error_type & 0x0100) seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n"); if (available_error_type & 0x0200) seq_printf(m, "0x00000200\tPlatform Correctable\n"); if (available_error_type & 0x0400) seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n"); if (available_error_type & 0x0800) seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n"); return 0; } static int available_error_type_open(struct inode *inode, struct file *file) { return single_open(file, available_error_type_show, NULL); } static const struct file_operations available_error_type_fops = { .open = available_error_type_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int error_type_get(void *data, u64 *val) { *val = error_type; return 0; } static int error_type_set(void *data, u64 val) { int rc; u32 available_error_type = 0; u32 tval, vendor; /* * Vendor defined types have 0x80000000 bit set, and * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE */ vendor = val & 0x80000000; tval = val & 0x7fffffff; /* Only one error type can be specified */ if (tval & (tval - 1)) return -EINVAL; if (!vendor) { rc = einj_get_available_error_type(&available_error_type); if (rc) return rc; if (!(val & available_error_type)) return -EINVAL; } error_type = val; return 0; } DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, error_type_set, "0x%llx\n"); static int error_inject_set(void *data, u64 val) { if (!error_type) return -EINVAL; return einj_error_inject(error_type, error_param1, error_param2); } DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, error_inject_set, "%llu\n"); static int einj_check_table(struct acpi_table_einj *einj_tab) { if ((einj_tab->header_length != (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) && (einj_tab->header_length != sizeof(struct acpi_table_einj))) return -EINVAL; if (einj_tab->header.length < sizeof(struct acpi_table_einj)) return -EINVAL; if (einj_tab->entries != (einj_tab->header.length - sizeof(struct acpi_table_einj)) / sizeof(struct acpi_einj_entry)) return -EINVAL; return 0; } static int __init einj_init(void) { int rc; acpi_status status; struct dentry *fentry; struct apei_exec_context ctx; if (acpi_disabled) return -ENODEV; status = acpi_get_table(ACPI_SIG_EINJ, 0, (struct acpi_table_header **)&einj_tab); if (status == AE_NOT_FOUND) return -ENODEV; else if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err(EINJ_PFX "Failed to get table, %s\n", msg); return -EINVAL; } rc = einj_check_table(einj_tab); if (rc) { pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n"); return -EINVAL; } rc = -ENOMEM; einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); if (!einj_debug_dir) goto err_cleanup; fentry = debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir, NULL, &available_error_type_fops); if (!fentry) goto err_cleanup; fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, einj_debug_dir, NULL, &error_type_fops); if (!fentry) goto err_cleanup; fentry = debugfs_create_file("error_inject", S_IWUSR, einj_debug_dir, NULL, &error_inject_fops); if (!fentry) goto err_cleanup; apei_resources_init(&einj_resources); einj_exec_ctx_init(&ctx); rc = apei_exec_collect_resources(&ctx, &einj_resources); if (rc) goto err_fini; rc = apei_resources_request(&einj_resources, "APEI EINJ"); if (rc) goto err_fini; rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; einj_param = einj_get_parameter_address(); if ((param_extension || acpi5) && einj_param) { fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param1); if (!fentry) goto err_unmap; fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, einj_debug_dir, &error_param2); if (!fentry) goto err_unmap; fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, einj_debug_dir, &notrigger); if (!fentry) goto err_unmap; } if (vendor_dev[0]) { vendor_blob.data = vendor_dev; vendor_blob.size = strlen(vendor_dev); fentry = debugfs_create_blob("vendor", S_IRUSR, einj_debug_dir, &vendor_blob); if (!fentry) goto err_unmap; fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, einj_debug_dir, &vendor_flags); if (!fentry) goto err_unmap; } pr_info(EINJ_PFX "Error INJection is initialized.\n"); return 0; err_unmap: if (einj_param) { acpi_size size = (acpi5) ? sizeof(struct set_error_type_with_address) : sizeof(struct einj_parameter); acpi_os_unmap_memory(einj_param, size); } apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&einj_resources); err_fini: apei_resources_fini(&einj_resources); err_cleanup: debugfs_remove_recursive(einj_debug_dir); return rc; } static void __exit einj_exit(void) { struct apei_exec_context ctx; if (einj_param) { acpi_size size = (acpi5) ? sizeof(struct set_error_type_with_address) : sizeof(struct einj_parameter); acpi_os_unmap_memory(einj_param, size); } einj_exec_ctx_init(&ctx); apei_exec_post_unmap_gars(&ctx); apei_resources_release(&einj_resources); apei_resources_fini(&einj_resources); debugfs_remove_recursive(einj_debug_dir); } module_init(einj_init); module_exit(einj_exit); MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("APEI Error INJection support"); MODULE_LICENSE("GPL");
gpl-2.0